diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll index 49d7d1a16ca9..8291b14e69c4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll @@ -18,92 +18,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv16i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv16i16_nxv16i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv32i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv32i16(,, i16*, , , i64) - -define @test_vloxseg2_nxv16i16_nxv32i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv32i16(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv16i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv32i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv4i32(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv4i32(,, i16*, , , i64) - -define @test_vloxseg2_nxv16i16_nxv4i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv4i32( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i8(i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8(,, i16*, , , i64) @@ -120,534 +48,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv16i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv16i16_nxv16i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv1i64(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv1i64(,, i16*, , , i64) - -define @test_vloxseg2_nxv16i16_nxv1i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv1i64(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8( %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv16i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv1i64( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv1i32(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv1i32(,, i16*, , , i64) - -define @test_vloxseg2_nxv16i16_nxv1i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv1i32( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv8i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv8i16(,, i16*, , , i64) - -define @test_vloxseg2_nxv16i16_nxv8i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv8i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv4i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv4i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv16i16_nxv4i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv4i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv1i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv1i16(,, i16*, , , i64) - -define @test_vloxseg2_nxv16i16_nxv1i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv1i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv2i32(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv2i32(,, i16*, , , i64) - -define @test_vloxseg2_nxv16i16_nxv2i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv2i32( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv8i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv8i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv16i16_nxv8i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv8i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv4i64(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv4i64(,, i16*, , , i64) - -define @test_vloxseg2_nxv16i16_nxv4i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv4i64( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv64i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv64i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv16i16_nxv64i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv64i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv4i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv4i16(,, i16*, , , i64) - -define @test_vloxseg2_nxv16i16_nxv4i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv4i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv8i64(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv8i64(,, i16*, , , i64) - -define @test_vloxseg2_nxv16i16_nxv8i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv8i64( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv1i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv1i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv16i16_nxv1i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv1i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv2i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv2i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv16i16_nxv2i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv2i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv8i32(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv8i32(,, i16*, , , i64) - -define @test_vloxseg2_nxv16i16_nxv8i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv8i32( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv32i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv32i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv16i16_nxv32i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv32i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i32(i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32(,, i16*, , , i64) @@ -664,160 +78,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv16i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv16i16_nxv16i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv2i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv2i16(,, i16*, , , i64) - -define @test_vloxseg2_nxv16i16_nxv2i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv2i16(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32( %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv16i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv2i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv2i64(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv2i64(,, i16*, , , i64) - -define @test_vloxseg2_nxv16i16_nxv2i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv2i64( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv16i16(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv16i16(,, i32*, , , i64) - -define @test_vloxseg2_nxv4i32_nxv16i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv16i16( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv32i16(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv32i16(,, i32*, , , i64) - -define @test_vloxseg2_nxv4i32_nxv32i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv32i16( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i32(i32*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32(,, i32*, , , i64) @@ -834,160 +108,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv4i32_nxv4i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv16i8(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv16i8(,, i32*, , , i64) - -define @test_vloxseg2_nxv4i32_nxv16i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv16i8(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32( %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv16i8( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv1i64(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv1i64(,, i32*, , , i64) - -define @test_vloxseg2_nxv4i32_nxv1i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv1i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv1i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv1i64( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv1i32(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv1i32(,, i32*, , , i64) - -define @test_vloxseg2_nxv4i32_nxv1i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv1i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv1i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv1i32( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv8i16(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv8i16(,, i32*, , , i64) - -define @test_vloxseg2_nxv4i32_nxv8i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv8i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv8i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv8i16( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i8(i32*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8(,, i32*, , , i64) @@ -1004,126 +138,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv4i32_nxv4i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv1i16(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv1i16(,, i32*, , , i64) - -define @test_vloxseg2_nxv4i32_nxv1i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv1i16(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8( %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv1i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv1i16( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv2i32(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv2i32(,, i32*, , , i64) - -define @test_vloxseg2_nxv4i32_nxv2i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv2i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv2i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv2i32( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv8i8(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv8i8(,, i32*, , , i64) - -define @test_vloxseg2_nxv4i32_nxv8i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv8i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv8i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv8i8( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i64(i32*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64(,, i32*, , , i64) @@ -1140,58 +168,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv4i32_nxv4i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv64i8(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv64i8(,, i32*, , , i64) - -define @test_vloxseg2_nxv4i32_nxv64i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv64i8(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64( %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv64i8( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i16(i32*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16(,, i32*, , , i64) @@ -1208,366 +198,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv4i32_nxv4i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv8i64(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv8i64(,, i32*, , , i64) - -define @test_vloxseg2_nxv4i32_nxv8i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv8i64(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16( %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv8i64( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv1i8(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv1i8(,, i32*, , , i64) - -define @test_vloxseg2_nxv4i32_nxv1i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv1i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv1i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv1i8( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv2i8(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv2i8(,, i32*, , , i64) - -define @test_vloxseg2_nxv4i32_nxv2i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv2i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv2i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv2i8( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv8i32(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv8i32(,, i32*, , , i64) - -define @test_vloxseg2_nxv4i32_nxv8i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv8i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv8i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv8i32( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv32i8(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv32i8(,, i32*, , , i64) - -define @test_vloxseg2_nxv4i32_nxv32i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv32i8( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv16i32(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv16i32(,, i32*, , , i64) - -define @test_vloxseg2_nxv4i32_nxv16i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv16i32( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv2i16(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv2i16(,, i32*, , , i64) - -define @test_vloxseg2_nxv4i32_nxv2i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv2i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv2i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv2i16( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv2i64(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv2i64(,, i32*, , , i64) - -define @test_vloxseg2_nxv4i32_nxv2i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv2i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv2i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv2i64( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv16i16(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv16i16(,,, i32*, , , i64) - -define @test_vloxseg3_nxv4i32_nxv16i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv16i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv32i16(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv32i16(,,, i32*, , , i64) - -define @test_vloxseg3_nxv4i32_nxv32i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv32i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i32(i32*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32(,,, i32*, , , i64) @@ -1584,165 +228,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv4i32_nxv4i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv16i8(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv16i8(,,, i32*, , , i64) - -define @test_vloxseg3_nxv4i32_nxv16i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv16i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv16i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv1i64(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv1i64(,,, i32*, , , i64) - -define @test_vloxseg3_nxv4i32_nxv1i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv1i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv1i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv1i64( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv1i32(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv1i32(,,, i32*, , , i64) - -define @test_vloxseg3_nxv4i32_nxv1i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv1i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv1i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv1i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv8i16(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv8i16(,,, i32*, , , i64) - -define @test_vloxseg3_nxv4i32_nxv8i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv8i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv8i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv8i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i8(i32*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8(,,, i32*, , , i64) @@ -1759,130 +260,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv4i32_nxv4i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv1i16(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv1i16(,,, i32*, , , i64) - -define @test_vloxseg3_nxv4i32_nxv1i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv1i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv1i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv1i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv2i32(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv2i32(,,, i32*, , , i64) - -define @test_vloxseg3_nxv4i32_nxv2i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv2i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv2i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv2i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv8i8(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv8i8(,,, i32*, , , i64) - -define @test_vloxseg3_nxv4i32_nxv8i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv8i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv8i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv8i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i64(i32*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64(,,, i32*, , , i64) @@ -1899,60 +292,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv4i32_nxv4i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv64i8(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv64i8(,,, i32*, , , i64) - -define @test_vloxseg3_nxv4i32_nxv64i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv64i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv64i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i16(i32*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16(,,, i32*, , , i64) @@ -1969,377 +323,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv4i32_nxv4i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv8i64(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv8i64(,,, i32*, , , i64) - -define @test_vloxseg3_nxv4i32_nxv8i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv8i64(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv8i64( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv1i8(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv1i8(,,, i32*, , , i64) - -define @test_vloxseg3_nxv4i32_nxv1i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv1i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv1i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv1i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv2i8(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv2i8(,,, i32*, , , i64) - -define @test_vloxseg3_nxv4i32_nxv2i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv2i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv2i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv2i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv8i32(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv8i32(,,, i32*, , , i64) - -define @test_vloxseg3_nxv4i32_nxv8i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv8i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv8i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv8i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv32i8(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv32i8(,,, i32*, , , i64) - -define @test_vloxseg3_nxv4i32_nxv32i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv32i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv16i32(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv16i32(,,, i32*, , , i64) - -define @test_vloxseg3_nxv4i32_nxv16i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv16i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv2i16(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv2i16(,,, i32*, , , i64) - -define @test_vloxseg3_nxv4i32_nxv2i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv2i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv2i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv2i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv2i64(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv2i64(,,, i32*, , , i64) - -define @test_vloxseg3_nxv4i32_nxv2i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv2i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv2i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv2i64( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv16i16(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv16i16(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv4i32_nxv16i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv16i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv32i16(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv32i16(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv4i32_nxv32i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv32i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i32(i32*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32(,,,, i32*, , , i64) @@ -2356,170 +355,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv4i32_nxv4i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv16i8(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv16i8(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv4i32_nxv16i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv16i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv16i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv1i64(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv1i64(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv4i32_nxv1i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv1i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv1i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv1i64( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv1i32(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv1i32(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv4i32_nxv1i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv1i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv1i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv1i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv8i16(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv8i16(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv4i32_nxv8i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv8i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv8i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv8i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i8(i32*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8(,,,, i32*, , , i64) @@ -2536,134 +388,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv4i32_nxv4i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv1i16(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv1i16(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv4i32_nxv1i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv1i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv1i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv1i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv2i32(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv2i32(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv4i32_nxv2i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv2i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv2i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv2i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv8i8(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv8i8(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv4i32_nxv8i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv8i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv8i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv8i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i64(i32*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64(,,,, i32*, , , i64) @@ -2680,60 +421,21 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv4i32_nxv4i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv64i8(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv64i8(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv4i32_nxv64i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv64i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i16(i32*, , i64) @@ -2752,314 +454,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv4i32_nxv4i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv8i64(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv8i64(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv4i32_nxv8i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv8i64( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv1i8(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv1i8(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv4i32_nxv1i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv1i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv1i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv1i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv2i8(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv2i8(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv4i32_nxv2i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv2i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv2i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv2i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv8i32(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv8i32(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv4i32_nxv8i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv8i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv8i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv8i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv32i8(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv32i8(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv4i32_nxv32i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv32i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv32i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv16i32(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv16i32(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv4i32_nxv16i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv16i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv2i16(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv2i16(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv4i32_nxv2i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv2i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv2i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv2i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv2i64(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv2i64(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv4i32_nxv2i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv2i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv2i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv2i64( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i16(i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16(,, i8*, , , i64) @@ -3076,92 +487,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv16i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv16i8_nxv16i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv32i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv32i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv16i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv32i16(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv16i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv32i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv4i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv4i32(,, i8*, , , i64) - -define @test_vloxseg2_nxv16i8_nxv4i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv4i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i8(i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8(,, i8*, , , i64) @@ -3178,534 +517,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv16i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv16i8_nxv16i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv1i64(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv1i64(,, i8*, , , i64) - -define @test_vloxseg2_nxv16i8_nxv1i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv1i64(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv16i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv1i64( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv1i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv1i32(,, i8*, , , i64) - -define @test_vloxseg2_nxv16i8_nxv1i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv1i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv8i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv8i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv16i8_nxv8i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv8i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv4i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv4i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv16i8_nxv4i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv4i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv1i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv1i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv16i8_nxv1i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv1i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv2i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv2i32(,, i8*, , , i64) - -define @test_vloxseg2_nxv16i8_nxv2i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv2i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv8i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv8i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv16i8_nxv8i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv8i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv4i64(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv4i64(,, i8*, , , i64) - -define @test_vloxseg2_nxv16i8_nxv4i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv4i64( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv64i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv64i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv16i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv64i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv4i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv4i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv16i8_nxv4i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv4i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv8i64(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv8i64(,, i8*, , , i64) - -define @test_vloxseg2_nxv16i8_nxv8i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv8i64( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv1i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv1i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv16i8_nxv1i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv1i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv2i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv2i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv16i8_nxv2i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv2i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv8i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv8i32(,, i8*, , , i64) - -define @test_vloxseg2_nxv16i8_nxv8i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv8i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv32i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv32i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv16i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv32i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i32(i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32(,, i8*, , , i64) @@ -3722,92 +547,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv16i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv16i8_nxv16i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v16, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv2i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv2i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv16i8_nxv2i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv2i16(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv16i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv2i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv2i64(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv2i64(,, i8*, , , i64) - -define @test_vloxseg2_nxv16i8_nxv2i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv2i64( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i16(i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16(,,, i8*, , , i64) @@ -3824,95 +577,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv16i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv16i8_nxv16i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv32i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv32i16(,,, i8*, , , i64) - -define @test_vloxseg3_nxv16i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv32i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv16i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv32i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv4i32(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv4i32(,,, i8*, , , i64) - -define @test_vloxseg3_nxv16i8_nxv4i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv16i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv4i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i8(i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8(,,, i8*, , , i64) @@ -3929,550 +608,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv16i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv16i8_nxv16i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv1i64(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv1i64(,,, i8*, , , i64) - -define @test_vloxseg3_nxv16i8_nxv1i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv1i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv16i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv1i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv1i32(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv1i32(,,, i8*, , , i64) - -define @test_vloxseg3_nxv16i8_nxv1i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv16i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv1i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv8i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv8i16(,,, i8*, , , i64) - -define @test_vloxseg3_nxv16i8_nxv8i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv16i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv8i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv4i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv4i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv16i8_nxv4i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv16i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv4i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv1i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv1i16(,,, i8*, , , i64) - -define @test_vloxseg3_nxv16i8_nxv1i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv16i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv1i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv2i32(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv2i32(,,, i8*, , , i64) - -define @test_vloxseg3_nxv16i8_nxv2i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv16i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv2i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv8i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv8i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv16i8_nxv8i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv16i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv8i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv4i64(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv4i64(,,, i8*, , , i64) - -define @test_vloxseg3_nxv16i8_nxv4i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv16i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv4i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv64i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv64i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv16i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv16i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv64i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv4i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv4i16(,,, i8*, , , i64) - -define @test_vloxseg3_nxv16i8_nxv4i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv16i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv4i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv8i64(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv8i64(,,, i8*, , , i64) - -define @test_vloxseg3_nxv16i8_nxv8i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv16i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv8i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv1i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv1i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv16i8_nxv1i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv16i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv1i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv2i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv2i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv16i8_nxv2i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv16i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv2i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv8i32(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv8i32(,,, i8*, , , i64) - -define @test_vloxseg3_nxv16i8_nxv8i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv16i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv8i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv32i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv32i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv16i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv16i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv32i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i32(i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32(,,, i8*, , , i64) @@ -4489,95 +640,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv16i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv16i8_nxv16i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v16, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv2i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv2i16(,,, i8*, , , i64) - -define @test_vloxseg3_nxv16i8_nxv2i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv2i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv16i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv2i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv2i64(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv2i64(,,, i8*, , , i64) - -define @test_vloxseg3_nxv16i8_nxv2i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv16i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv2i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i16(i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16(,,,, i8*, , , i64) @@ -4594,98 +671,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv16i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv16i8_nxv16i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv32i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv32i16(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv16i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv16i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv32i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv4i32(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv4i32(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv16i8_nxv4i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv4i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv16i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv4i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i8(i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8(,,,, i8*, , , i64) @@ -4702,566 +704,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv16i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv16i8_nxv16i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv1i64(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv1i64(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv16i8_nxv1i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv16i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv1i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv1i32(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv1i32(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv16i8_nxv1i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv16i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv1i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv8i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv8i16(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv16i8_nxv8i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv16i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv8i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv4i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv4i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv16i8_nxv4i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv16i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv4i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv1i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv1i16(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv16i8_nxv1i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv16i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv1i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv2i32(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv2i32(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv16i8_nxv2i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv16i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv2i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv8i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv8i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv16i8_nxv8i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv16i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv8i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv4i64(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv4i64(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv16i8_nxv4i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv16i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv4i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv64i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv64i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv16i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv64i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv16i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv64i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv4i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv4i16(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv16i8_nxv4i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv16i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv4i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv8i64(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv8i64(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv16i8_nxv8i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv16i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv8i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv1i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv1i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv16i8_nxv1i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv16i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv1i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv2i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv2i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv16i8_nxv2i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv16i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv2i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv8i32(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv8i32(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv16i8_nxv8i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv16i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv8i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv32i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv32i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv16i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv16i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv32i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i32(i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32(,,,, i8*, , , i64) @@ -5278,234 +737,22 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv16i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv16i8_nxv16i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: vloxseg4ei32.v v6, (a0), v16, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv2i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv2i16(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv16i8_nxv2i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv2i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv16i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv2i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv2i64(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv2i64(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv16i8_nxv2i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv16i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv2i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv16i16(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv16i16(,, i64*, , , i64) - -define @test_vloxseg2_nxv1i64_nxv16i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv16i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i64_nxv16i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv16i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv16i16( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv32i16(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv32i16(,, i64*, , , i64) - -define @test_vloxseg2_nxv1i64_nxv32i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv32i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i64_nxv32i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv32i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv32i16( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv4i32(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv4i32(,, i64*, , , i64) - -define @test_vloxseg2_nxv1i64_nxv4i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv4i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i64_nxv4i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv4i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv4i32( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv16i8(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv16i8(,, i64*, , , i64) - -define @test_vloxseg2_nxv1i64_nxv16i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv16i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i64_nxv16i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv16i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv16i8( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i64(i64*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64(,, i64*, , , i64) @@ -5522,22 +769,18 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64( %val, %val, i64* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i32(i64*, , i64) @@ -5556,92 +799,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv8i16(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv8i16(,, i64*, , , i64) - -define @test_vloxseg2_nxv1i64_nxv8i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv8i16(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32( %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1i64_nxv8i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv8i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv8i16( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv4i8(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv4i8(,, i64*, , , i64) - -define @test_vloxseg2_nxv1i64_nxv4i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv4i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i64_nxv4i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv4i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv4i8( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i16(i64*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16(,, i64*, , , i64) @@ -5658,228 +829,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv2i32(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv2i32(,, i64*, , , i64) - -define @test_vloxseg2_nxv1i64_nxv2i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv2i32(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16( %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1i64_nxv2i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv2i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv2i32( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv8i8(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv8i8(,, i64*, , , i64) - -define @test_vloxseg2_nxv1i64_nxv8i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv8i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i64_nxv8i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv8i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv8i8( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv4i64(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv4i64(,, i64*, , , i64) - -define @test_vloxseg2_nxv1i64_nxv4i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv4i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i64_nxv4i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv4i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv4i64( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv64i8(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv64i8(,, i64*, , , i64) - -define @test_vloxseg2_nxv1i64_nxv64i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv64i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i64_nxv64i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv64i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv64i8( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv4i16(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv4i16(,, i64*, , , i64) - -define @test_vloxseg2_nxv1i64_nxv4i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv4i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i64_nxv4i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv4i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv4i16( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv8i64(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv8i64(,, i64*, , , i64) - -define @test_vloxseg2_nxv1i64_nxv8i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv8i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i64_nxv8i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv8i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv8i64( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i8(i64*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8(,, i64*, , , i64) @@ -5896,368 +859,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv2i8(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv2i8(,, i64*, , , i64) - -define @test_vloxseg2_nxv1i64_nxv2i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv2i8(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8( %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1i64_nxv2i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv2i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv2i8( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv8i32(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv8i32(,, i64*, , , i64) - -define @test_vloxseg2_nxv1i64_nxv8i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv8i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i64_nxv8i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv8i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv8i32( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv32i8(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv32i8(,, i64*, , , i64) - -define @test_vloxseg2_nxv1i64_nxv32i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv32i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i64_nxv32i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv32i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv32i8( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv16i32(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv16i32(,, i64*, , , i64) - -define @test_vloxseg2_nxv1i64_nxv16i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv16i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i64_nxv16i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv16i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv16i32( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv2i16(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv2i16(,, i64*, , , i64) - -define @test_vloxseg2_nxv1i64_nxv2i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv2i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i64_nxv2i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv2i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv2i16( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv2i64(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv2i64(,, i64*, , , i64) - -define @test_vloxseg2_nxv1i64_nxv2i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv2i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i64_nxv2i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv2i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv2i64( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv16i16(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv16i16(,,, i64*, , , i64) - -define @test_vloxseg3_nxv1i64_nxv16i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv16i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i64_nxv16i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv16i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv16i16( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv32i16(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv32i16(,,, i64*, , , i64) - -define @test_vloxseg3_nxv1i64_nxv32i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv32i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i64_nxv32i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv32i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv32i16( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv4i32(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv4i32(,,, i64*, , , i64) - -define @test_vloxseg3_nxv1i64_nxv4i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv4i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i64_nxv4i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv4i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv4i32( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv16i8(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv16i8(,,, i64*, , , i64) - -define @test_vloxseg3_nxv1i64_nxv16i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv16i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i64_nxv16i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv16i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv16i8( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i64(i64*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64(,,, i64*, , , i64) @@ -6274,23 +889,20 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i32(i64*, , i64) @@ -6309,95 +921,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv8i16(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv8i16(,,, i64*, , , i64) - -define @test_vloxseg3_nxv1i64_nxv8i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv8i16(i64* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1i64_nxv8i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv8i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv8i16( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv4i8(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv4i8(,,, i64*, , , i64) - -define @test_vloxseg3_nxv1i64_nxv4i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv4i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i64_nxv4i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv4i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv4i8( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i16(i64*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16(,,, i64*, , , i64) @@ -6414,235 +953,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv2i32(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv2i32(,,, i64*, , , i64) - -define @test_vloxseg3_nxv1i64_nxv2i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv2i32(i64* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1i64_nxv2i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv2i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv2i32( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv8i8(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv8i8(,,, i64*, , , i64) - -define @test_vloxseg3_nxv1i64_nxv8i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv8i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i64_nxv8i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv8i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv8i8( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv4i64(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv4i64(,,, i64*, , , i64) - -define @test_vloxseg3_nxv1i64_nxv4i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv4i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i64_nxv4i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv4i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv4i64( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv64i8(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv64i8(,,, i64*, , , i64) - -define @test_vloxseg3_nxv1i64_nxv64i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv64i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i64_nxv64i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv64i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv64i8( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv4i16(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv4i16(,,, i64*, , , i64) - -define @test_vloxseg3_nxv1i64_nxv4i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv4i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i64_nxv4i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv4i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv4i16( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv8i64(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv8i64(,,, i64*, , , i64) - -define @test_vloxseg3_nxv1i64_nxv8i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv8i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i64_nxv8i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv8i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv8i64( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i8(i64*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8(,,, i64*, , , i64) @@ -6659,379 +985,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv2i8(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv2i8(,,, i64*, , , i64) - -define @test_vloxseg3_nxv1i64_nxv2i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv2i8(i64* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1i64_nxv2i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv2i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv2i8( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv8i32(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv8i32(,,, i64*, , , i64) - -define @test_vloxseg3_nxv1i64_nxv8i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv8i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i64_nxv8i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv8i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv8i32( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv32i8(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv32i8(,,, i64*, , , i64) - -define @test_vloxseg3_nxv1i64_nxv32i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv32i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i64_nxv32i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv32i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv32i8( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv16i32(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv16i32(,,, i64*, , , i64) - -define @test_vloxseg3_nxv1i64_nxv16i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv16i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i64_nxv16i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv16i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv16i32( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv2i16(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv2i16(,,, i64*, , , i64) - -define @test_vloxseg3_nxv1i64_nxv2i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv2i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i64_nxv2i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv2i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv2i16( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv2i64(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv2i64(,,, i64*, , , i64) - -define @test_vloxseg3_nxv1i64_nxv2i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv2i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i64_nxv2i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv2i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv2i64( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv16i16(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv16i16(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv1i64_nxv16i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv16i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i64_nxv16i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv16i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv16i16( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv32i16(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv32i16(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv1i64_nxv32i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv32i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i64_nxv32i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv32i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv32i16( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv4i32(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv4i32(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv1i64_nxv4i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv4i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i64_nxv4i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv4i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv4i32( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv16i8(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv16i8(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv1i64_nxv16i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv16i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i64_nxv16i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv16i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv16i8( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i64(i64*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64(,,,, i64*, , , i64) @@ -7048,24 +1017,21 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i32(i64*, , i64) @@ -7084,98 +1050,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv8i16(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv8i16(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv1i64_nxv8i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv8i16(i64* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1i64_nxv8i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv8i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv8i16( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv4i8(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv4i8(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv1i64_nxv4i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv4i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i64_nxv4i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv4i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv4i8( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i16(i64*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16(,,,, i64*, , , i64) @@ -7192,242 +1083,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv2i32(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv2i32(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv1i64_nxv2i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv2i32(i64* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1i64_nxv2i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv2i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv2i32( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv8i8(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv8i8(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv1i64_nxv8i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv8i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i64_nxv8i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv8i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv8i8( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv4i64(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv4i64(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv1i64_nxv4i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv4i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i64_nxv4i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv4i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv4i64( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv64i8(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv64i8(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv1i64_nxv64i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv64i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i64_nxv64i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv64i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv64i8( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv4i16(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv4i16(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv1i64_nxv4i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv4i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i64_nxv4i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv4i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv4i16( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv8i64(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv8i64(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv1i64_nxv8i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv8i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i64_nxv8i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv8i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv8i64( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i8(i64*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8(,,,, i64*, , , i64) @@ -7444,390 +1116,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv2i8(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv2i8(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv1i64_nxv2i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv2i8(i64* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1i64_nxv2i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv2i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv2i8( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv8i32(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv8i32(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv1i64_nxv8i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv8i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i64_nxv8i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv8i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv8i32( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv32i8(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv32i8(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv1i64_nxv32i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv32i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i64_nxv32i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv32i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv32i8( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv16i32(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv16i32(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv1i64_nxv16i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv16i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i64_nxv16i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv16i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv16i32( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv2i16(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv2i16(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv1i64_nxv2i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv2i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i64_nxv2i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv2i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv2i16( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv2i64(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv2i64(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv1i64_nxv2i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv2i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i64_nxv2i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv2i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv2i64( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv16i16(i64*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv16i16(,,,,, i64*, , , i64) - -define @test_vloxseg5_nxv1i64_nxv16i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv16i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i64_nxv16i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv16i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv16i16( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv32i16(i64*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv32i16(,,,,, i64*, , , i64) - -define @test_vloxseg5_nxv1i64_nxv32i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv32i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i64_nxv32i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv32i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv32i16( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv4i32(i64*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv4i32(,,,,, i64*, , , i64) - -define @test_vloxseg5_nxv1i64_nxv4i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv4i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i64_nxv4i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv4i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv4i32( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv16i8(i64*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv16i8(,,,,, i64*, , , i64) - -define @test_vloxseg5_nxv1i64_nxv16i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv16i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i64_nxv16i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv16i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv16i8( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i64(i64*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64(,,,,, i64*, , , i64) @@ -7844,25 +1149,22 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i32(i64*, , i64) @@ -7881,101 +1183,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv8i16(i64*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv8i16(,,,,, i64*, , , i64) - -define @test_vloxseg5_nxv1i64_nxv8i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv8i16(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1i64_nxv8i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv8i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv8i16( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv4i8(i64*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv4i8(,,,,, i64*, , , i64) - -define @test_vloxseg5_nxv1i64_nxv4i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv4i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i64_nxv4i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv4i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv4i8( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i16(i64*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16(,,,,, i64*, , , i64) @@ -7992,249 +1217,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv2i32(i64*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv2i32(,,,,, i64*, , , i64) - -define @test_vloxseg5_nxv1i64_nxv2i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv2i32(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1i64_nxv2i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv2i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv2i32( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv8i8(i64*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv8i8(,,,,, i64*, , , i64) - -define @test_vloxseg5_nxv1i64_nxv8i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv8i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i64_nxv8i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv8i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv8i8( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv4i64(i64*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv4i64(,,,,, i64*, , , i64) - -define @test_vloxseg5_nxv1i64_nxv4i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv4i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i64_nxv4i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv4i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv4i64( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv64i8(i64*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv64i8(,,,,, i64*, , , i64) - -define @test_vloxseg5_nxv1i64_nxv64i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv64i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i64_nxv64i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv64i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv64i8( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv4i16(i64*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv4i16(,,,,, i64*, , , i64) - -define @test_vloxseg5_nxv1i64_nxv4i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv4i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i64_nxv4i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv4i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv4i16( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv8i64(i64*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv8i64(,,,,, i64*, , , i64) - -define @test_vloxseg5_nxv1i64_nxv8i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv8i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i64_nxv8i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv8i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv8i64( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i8(i64*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8(,,,,, i64*, , , i64) @@ -8251,401 +1251,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv2i8(i64*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv2i8(,,,,, i64*, , , i64) - -define @test_vloxseg5_nxv1i64_nxv2i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv2i8(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1i64_nxv2i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv2i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv2i8( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv8i32(i64*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv8i32(,,,,, i64*, , , i64) - -define @test_vloxseg5_nxv1i64_nxv8i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv8i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i64_nxv8i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv8i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv8i32( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv32i8(i64*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv32i8(,,,,, i64*, , , i64) - -define @test_vloxseg5_nxv1i64_nxv32i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv32i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i64_nxv32i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv32i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv32i8( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv16i32(i64*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv16i32(,,,,, i64*, , , i64) - -define @test_vloxseg5_nxv1i64_nxv16i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv16i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i64_nxv16i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv16i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv16i32( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv2i16(i64*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv2i16(,,,,, i64*, , , i64) - -define @test_vloxseg5_nxv1i64_nxv2i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv2i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i64_nxv2i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv2i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv2i16( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv2i64(i64*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv2i64(,,,,, i64*, , , i64) - -define @test_vloxseg5_nxv1i64_nxv2i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv2i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i64_nxv2i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv2i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv2i64( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv16i16(i64*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv16i16(,,,,,, i64*, , , i64) - -define @test_vloxseg6_nxv1i64_nxv16i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv16i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i64_nxv16i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv16i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv16i16( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv32i16(i64*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv32i16(,,,,,, i64*, , , i64) - -define @test_vloxseg6_nxv1i64_nxv32i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv32i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i64_nxv32i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv32i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv32i16( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv4i32(i64*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv4i32(,,,,,, i64*, , , i64) - -define @test_vloxseg6_nxv1i64_nxv4i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv4i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i64_nxv4i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv4i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv4i32( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv16i8(i64*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv16i8(,,,,,, i64*, , , i64) - -define @test_vloxseg6_nxv1i64_nxv16i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv16i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i64_nxv16i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv16i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv16i8( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i64(i64*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64(,,,,,, i64*, , , i64) @@ -8662,26 +1285,23 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i32(i64*, , i64) @@ -8700,104 +1320,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv8i16(i64*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv8i16(,,,,,, i64*, , , i64) - -define @test_vloxseg6_nxv1i64_nxv8i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv8i16(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1i64_nxv8i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv8i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv8i16( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv4i8(i64*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv4i8(,,,,,, i64*, , , i64) - -define @test_vloxseg6_nxv1i64_nxv4i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv4i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i64_nxv4i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv4i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv4i8( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i16(i64*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16(,,,,,, i64*, , , i64) @@ -8814,256 +1355,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv2i32(i64*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv2i32(,,,,,, i64*, , , i64) - -define @test_vloxseg6_nxv1i64_nxv2i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv2i32(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1i64_nxv2i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv2i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv2i32( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv8i8(i64*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv8i8(,,,,,, i64*, , , i64) - -define @test_vloxseg6_nxv1i64_nxv8i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv8i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i64_nxv8i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv8i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv8i8( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv4i64(i64*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv4i64(,,,,,, i64*, , , i64) - -define @test_vloxseg6_nxv1i64_nxv4i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv4i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i64_nxv4i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv4i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv4i64( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv64i8(i64*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv64i8(,,,,,, i64*, , , i64) - -define @test_vloxseg6_nxv1i64_nxv64i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv64i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i64_nxv64i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv64i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv64i8( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv4i16(i64*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv4i16(,,,,,, i64*, , , i64) - -define @test_vloxseg6_nxv1i64_nxv4i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv4i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i64_nxv4i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv4i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv4i16( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv8i64(i64*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv8i64(,,,,,, i64*, , , i64) - -define @test_vloxseg6_nxv1i64_nxv8i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv8i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i64_nxv8i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv8i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv8i64( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i8(i64*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8(,,,,,, i64*, , , i64) @@ -9080,412 +1390,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv2i8(i64*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv2i8(,,,,,, i64*, , , i64) - -define @test_vloxseg6_nxv1i64_nxv2i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv2i8(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1i64_nxv2i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv2i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv2i8( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv8i32(i64*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv8i32(,,,,,, i64*, , , i64) - -define @test_vloxseg6_nxv1i64_nxv8i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv8i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i64_nxv8i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv8i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv8i32( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv32i8(i64*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv32i8(,,,,,, i64*, , , i64) - -define @test_vloxseg6_nxv1i64_nxv32i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv32i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i64_nxv32i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv32i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv32i8( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv16i32(i64*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv16i32(,,,,,, i64*, , , i64) - -define @test_vloxseg6_nxv1i64_nxv16i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv16i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i64_nxv16i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv16i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv16i32( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv2i16(i64*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv2i16(,,,,,, i64*, , , i64) - -define @test_vloxseg6_nxv1i64_nxv2i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv2i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i64_nxv2i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv2i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv2i16( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv2i64(i64*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv2i64(,,,,,, i64*, , , i64) - -define @test_vloxseg6_nxv1i64_nxv2i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv2i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i64_nxv2i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv2i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv2i64( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv16i16(i64*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv16i16(,,,,,,, i64*, , , i64) - -define @test_vloxseg7_nxv1i64_nxv16i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv16i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i64_nxv16i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv16i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv16i16( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv32i16(i64*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv32i16(,,,,,,, i64*, , , i64) - -define @test_vloxseg7_nxv1i64_nxv32i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv32i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i64_nxv32i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv32i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv32i16( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv4i32(i64*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv4i32(,,,,,,, i64*, , , i64) - -define @test_vloxseg7_nxv1i64_nxv4i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv4i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i64_nxv4i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv4i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv4i32( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv16i8(i64*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv16i8(,,,,,,, i64*, , , i64) - -define @test_vloxseg7_nxv1i64_nxv16i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv16i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i64_nxv16i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv16i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv16i8( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i64(i64*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64(,,,,,,, i64*, , , i64) @@ -9502,11 +1425,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -9514,15 +1436,13 @@ define @test_vloxseg7_mask_nxv1i64_nxv1i64(i64* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i32(i64*, , i64) @@ -9541,11 +1461,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -9553,95 +1472,15 @@ define @test_vloxseg7_mask_nxv1i64_nxv1i32(i64* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv8i16(i64*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv8i16(,,,,,,, i64*, , , i64) - -define @test_vloxseg7_nxv1i64_nxv8i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv8i16(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1i64_nxv8i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv8i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv8i16( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv4i8(i64*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv4i8(,,,,,,, i64*, , , i64) - -define @test_vloxseg7_nxv1i64_nxv4i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv4i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i64_nxv4i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv4i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i16(i64*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16(,,,,,,, i64*, , , i64) @@ -9658,11 +1497,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -9670,251 +1508,15 @@ define @test_vloxseg7_mask_nxv1i64_nxv1i16(i64* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv2i32(i64*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv2i32(,,,,,,, i64*, , , i64) - -define @test_vloxseg7_nxv1i64_nxv2i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv2i32(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1i64_nxv2i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv2i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv8i8(i64*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv8i8(,,,,,,, i64*, , , i64) - -define @test_vloxseg7_nxv1i64_nxv8i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv8i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i64_nxv8i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv8i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv4i64(i64*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv4i64(,,,,,,, i64*, , , i64) - -define @test_vloxseg7_nxv1i64_nxv4i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv4i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i64_nxv4i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv4i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv4i64( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv64i8(i64*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv64i8(,,,,,,, i64*, , , i64) - -define @test_vloxseg7_nxv1i64_nxv64i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv64i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i64_nxv64i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv64i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv64i8( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv4i16(i64*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv4i16(,,,,,,, i64*, , , i64) - -define @test_vloxseg7_nxv1i64_nxv4i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv4i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i64_nxv4i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv4i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv8i64(i64*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv8i64(,,,,,,, i64*, , , i64) - -define @test_vloxseg7_nxv1i64_nxv8i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv8i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i64_nxv8i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv8i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv8i64( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i8(i64*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8(,,,,,,, i64*, , , i64) @@ -9931,11 +1533,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -9943,411 +1544,15 @@ define @test_vloxseg7_mask_nxv1i64_nxv1i8(i64* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv2i8(i64*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv2i8(,,,,,,, i64*, , , i64) - -define @test_vloxseg7_nxv1i64_nxv2i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv2i8(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1i64_nxv2i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv2i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv8i32(i64*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv8i32(,,,,,,, i64*, , , i64) - -define @test_vloxseg7_nxv1i64_nxv8i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv8i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i64_nxv8i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv8i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv8i32( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv32i8(i64*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv32i8(,,,,,,, i64*, , , i64) - -define @test_vloxseg7_nxv1i64_nxv32i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv32i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i64_nxv32i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv32i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv32i8( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv16i32(i64*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv16i32(,,,,,,, i64*, , , i64) - -define @test_vloxseg7_nxv1i64_nxv16i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv16i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i64_nxv16i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv16i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv16i32( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv2i16(i64*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv2i16(,,,,,,, i64*, , , i64) - -define @test_vloxseg7_nxv1i64_nxv2i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv2i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i64_nxv2i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv2i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv2i64(i64*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv2i64(,,,,,,, i64*, , , i64) - -define @test_vloxseg7_nxv1i64_nxv2i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv2i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i64_nxv2i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv2i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv2i64( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv16i16(i64*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv16i16(,,,,,,,, i64*, , , i64) - -define @test_vloxseg8_nxv1i64_nxv16i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv16i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i64_nxv16i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv16i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv32i16(i64*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv32i16(,,,,,,,, i64*, , , i64) - -define @test_vloxseg8_nxv1i64_nxv32i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv32i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i64_nxv32i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv32i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv4i32(i64*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv4i32(,,,,,,,, i64*, , , i64) - -define @test_vloxseg8_nxv1i64_nxv4i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv4i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i64_nxv4i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv4i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv16i8(i64*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv16i8(,,,,,,,, i64*, , , i64) - -define @test_vloxseg8_nxv1i64_nxv16i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv16i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i64_nxv16i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv16i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i64(i64*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64(,,,,,,,, i64*, , , i64) @@ -10364,28 +1569,25 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i32(i64*, , i64) @@ -10404,51 +1606,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv8i16(i64*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv8i16(,,,,,,,, i64*, , , i64) - -define @test_vloxseg8_nxv1i64_nxv8i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv8i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i64_nxv8i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -10457,57 +1618,15 @@ define @test_vloxseg8_mask_nxv1i64_nxv8i16(i64* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv8i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv4i8(i64*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv4i8(,,,,,,,, i64*, , , i64) - -define @test_vloxseg8_nxv1i64_nxv4i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv4i8(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv1i64_nxv4i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv4i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i16(i64*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16(,,,,,,,, i64*, , , i64) @@ -10524,270 +1643,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv2i32(i64*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv2i32(,,,,,,,, i64*, , , i64) - -define @test_vloxseg8_nxv1i64_nxv2i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv2i32(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv1i64_nxv2i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv2i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv8i8(i64*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv8i8(,,,,,,,, i64*, , , i64) - -define @test_vloxseg8_nxv1i64_nxv8i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv8i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i64_nxv8i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv8i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv4i64(i64*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv4i64(,,,,,,,, i64*, , , i64) - -define @test_vloxseg8_nxv1i64_nxv4i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv4i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i64_nxv4i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv4i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv64i8(i64*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv64i8(,,,,,,,, i64*, , , i64) - -define @test_vloxseg8_nxv1i64_nxv64i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv64i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i64_nxv64i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv64i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv4i16(i64*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv4i16(,,,,,,,, i64*, , , i64) - -define @test_vloxseg8_nxv1i64_nxv4i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv4i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i64_nxv4i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv4i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv8i64(i64*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv8i64(,,,,,,,, i64*, , , i64) - -define @test_vloxseg8_nxv1i64_nxv8i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv8i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i64_nxv8i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv8i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i8(i64*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8(,,,,,,,, i64*, , , i64) @@ -10804,251 +1680,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv2i8(i64*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv2i8(,,,,,,,, i64*, , , i64) - -define @test_vloxseg8_nxv1i64_nxv2i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv2i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i64_nxv2i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv2i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv8i32(i64*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv8i32(,,,,,,,, i64*, , , i64) - -define @test_vloxseg8_nxv1i64_nxv8i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv8i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i64_nxv8i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv8i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv32i8(i64*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv32i8(,,,,,,,, i64*, , , i64) - -define @test_vloxseg8_nxv1i64_nxv32i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv32i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i64_nxv32i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv32i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv16i32(i64*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv16i32(,,,,,,,, i64*, , , i64) - -define @test_vloxseg8_nxv1i64_nxv16i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv16i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i64_nxv16i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv16i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv2i16(i64*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv2i16(,,,,,,,, i64*, , , i64) - -define @test_vloxseg8_nxv1i64_nxv2i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv2i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i64_nxv2i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv2i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv2i64(i64*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv2i64(,,,,,,,, i64*, , , i64) - -define @test_vloxseg8_nxv1i64_nxv2i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv2i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i64_nxv2i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -11057,151 +1692,13 @@ define @test_vloxseg8_mask_nxv1i64_nxv2i64(i64* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv2i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv16i16(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv16i16(,, i32*, , , i64) - -define @test_vloxseg2_nxv1i32_nxv16i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv16i16( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv32i16(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv32i16(,, i32*, , , i64) - -define @test_vloxseg2_nxv1i32_nxv32i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv32i16( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv4i32(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv4i32(,, i32*, , , i64) - -define @test_vloxseg2_nxv1i32_nxv4i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv4i32( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv16i8(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv16i8(,, i32*, , , i64) - -define @test_vloxseg2_nxv1i32_nxv16i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv16i8( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i64(i32*, , i64) @@ -11220,22 +1717,18 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64( %val, %val, i32* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i32(i32*, , i64) @@ -11254,92 +1747,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv8i16(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv8i16(,, i32*, , , i64) - -define @test_vloxseg2_nxv1i32_nxv8i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv8i16(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32( %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv8i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv8i16( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv4i8(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv4i8(,, i32*, , , i64) - -define @test_vloxseg2_nxv1i32_nxv4i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv4i8( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i16(i32*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16(,, i32*, , , i64) @@ -11356,228 +1777,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv2i32(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv2i32(,, i32*, , , i64) - -define @test_vloxseg2_nxv1i32_nxv2i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv2i32(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16( %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv2i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv2i32( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv8i8(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv8i8(,, i32*, , , i64) - -define @test_vloxseg2_nxv1i32_nxv8i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv8i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv8i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv8i8( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv4i64(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv4i64(,, i32*, , , i64) - -define @test_vloxseg2_nxv1i32_nxv4i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv4i64( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv64i8(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv64i8(,, i32*, , , i64) - -define @test_vloxseg2_nxv1i32_nxv64i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv64i8( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv4i16(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv4i16(,, i32*, , , i64) - -define @test_vloxseg2_nxv1i32_nxv4i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv4i16( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv8i64(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv8i64(,, i32*, , , i64) - -define @test_vloxseg2_nxv1i32_nxv8i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv8i64( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i8(i32*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8(,, i32*, , , i64) @@ -11594,368 +1807,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv2i8(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv2i8(,, i32*, , , i64) - -define @test_vloxseg2_nxv1i32_nxv2i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv2i8(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8( %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv2i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv2i8( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv8i32(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv8i32(,, i32*, , , i64) - -define @test_vloxseg2_nxv1i32_nxv8i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv8i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv8i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv8i32( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv32i8(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv32i8(,, i32*, , , i64) - -define @test_vloxseg2_nxv1i32_nxv32i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv32i8( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv16i32(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv16i32(,, i32*, , , i64) - -define @test_vloxseg2_nxv1i32_nxv16i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv16i32( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv2i16(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv2i16(,, i32*, , , i64) - -define @test_vloxseg2_nxv1i32_nxv2i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv2i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv2i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv2i16( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv2i64(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv2i64(,, i32*, , , i64) - -define @test_vloxseg2_nxv1i32_nxv2i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv2i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv2i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv2i64( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv16i16(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv16i16(,,, i32*, , , i64) - -define @test_vloxseg3_nxv1i32_nxv16i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv16i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv32i16(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv32i16(,,, i32*, , , i64) - -define @test_vloxseg3_nxv1i32_nxv32i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv32i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv4i32(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv4i32(,,, i32*, , , i64) - -define @test_vloxseg3_nxv1i32_nxv4i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv4i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv16i8(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv16i8(,,, i32*, , , i64) - -define @test_vloxseg3_nxv1i32_nxv16i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv16i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i64(i32*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64(,,, i32*, , , i64) @@ -11972,23 +1837,20 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i32(i32*, , i64) @@ -12007,95 +1869,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv8i16(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv8i16(,,, i32*, , , i64) - -define @test_vloxseg3_nxv1i32_nxv8i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv8i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv8i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv8i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv4i8(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv4i8(,,, i32*, , , i64) - -define @test_vloxseg3_nxv1i32_nxv4i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv4i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i16(i32*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16(,,, i32*, , , i64) @@ -12112,235 +1901,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv2i32(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv2i32(,,, i32*, , , i64) - -define @test_vloxseg3_nxv1i32_nxv2i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv2i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv2i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv2i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv8i8(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv8i8(,,, i32*, , , i64) - -define @test_vloxseg3_nxv1i32_nxv8i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv8i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv8i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv8i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv4i64(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv4i64(,,, i32*, , , i64) - -define @test_vloxseg3_nxv1i32_nxv4i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv4i64( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv64i8(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv64i8(,,, i32*, , , i64) - -define @test_vloxseg3_nxv1i32_nxv64i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv64i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv4i16(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv4i16(,,, i32*, , , i64) - -define @test_vloxseg3_nxv1i32_nxv4i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv4i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv8i64(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv8i64(,,, i32*, , , i64) - -define @test_vloxseg3_nxv1i32_nxv8i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv8i64( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i8(i32*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8(,,, i32*, , , i64) @@ -12357,379 +1933,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv2i8(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv2i8(,,, i32*, , , i64) - -define @test_vloxseg3_nxv1i32_nxv2i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv2i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv2i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv2i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv8i32(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv8i32(,,, i32*, , , i64) - -define @test_vloxseg3_nxv1i32_nxv8i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv8i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv8i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv8i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv32i8(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv32i8(,,, i32*, , , i64) - -define @test_vloxseg3_nxv1i32_nxv32i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv32i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv16i32(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv16i32(,,, i32*, , , i64) - -define @test_vloxseg3_nxv1i32_nxv16i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv16i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv2i16(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv2i16(,,, i32*, , , i64) - -define @test_vloxseg3_nxv1i32_nxv2i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv2i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv2i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv2i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv2i64(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv2i64(,,, i32*, , , i64) - -define @test_vloxseg3_nxv1i32_nxv2i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv2i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv2i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv2i64( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv16i16(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv16i16(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv1i32_nxv16i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv16i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv32i16(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv32i16(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv1i32_nxv32i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv32i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv4i32(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv4i32(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv1i32_nxv4i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv4i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv16i8(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv16i8(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv1i32_nxv16i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv16i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i64(i32*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64(,,,, i32*, , , i64) @@ -12746,24 +1965,21 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i32(i32*, , i64) @@ -12782,98 +1998,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv8i16(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv8i16(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv1i32_nxv8i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv8i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv8i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv8i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv4i8(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv4i8(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv1i32_nxv4i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv4i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i16(i32*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16(,,,, i32*, , , i64) @@ -12890,242 +2031,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv2i32(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv2i32(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv1i32_nxv2i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv2i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv2i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv2i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv8i8(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv8i8(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv1i32_nxv8i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv8i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv8i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv8i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv4i64(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv4i64(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv1i32_nxv4i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv4i64( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv64i8(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv64i8(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv1i32_nxv64i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv64i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv4i16(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv4i16(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv1i32_nxv4i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv4i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv8i64(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv8i64(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv1i32_nxv8i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv8i64( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i8(i32*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8(,,,, i32*, , , i64) @@ -13142,390 +2064,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv2i8(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv2i8(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv1i32_nxv2i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv2i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv2i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv2i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv8i32(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv8i32(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv1i32_nxv8i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv8i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv8i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv8i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv32i8(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv32i8(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv1i32_nxv32i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv32i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv16i32(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv16i32(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv1i32_nxv16i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv16i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv2i16(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv2i16(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv1i32_nxv2i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv2i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv2i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv2i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv2i64(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv2i64(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv1i32_nxv2i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv2i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv2i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv2i64( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv16i16(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv16i16(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv1i32_nxv16i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv16i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv32i16(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv32i16(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv1i32_nxv32i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv32i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv4i32(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv4i32(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv1i32_nxv4i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv4i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv16i8(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv16i8(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv1i32_nxv16i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv16i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i64(i32*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64(,,,,, i32*, , , i64) @@ -13542,25 +2097,22 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i32(i32*, , i64) @@ -13579,101 +2131,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv8i16(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv8i16(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv1i32_nxv8i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv8i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv8i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv8i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv4i8(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv4i8(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv1i32_nxv4i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv4i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i16(i32*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16(,,,,, i32*, , , i64) @@ -13690,249 +2165,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv2i32(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv2i32(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv1i32_nxv2i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv2i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv2i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv2i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv8i8(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv8i8(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv1i32_nxv8i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv8i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv8i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv8i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv4i64(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv4i64(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv1i32_nxv4i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv4i64( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv64i8(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv64i8(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv1i32_nxv64i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv64i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv4i16(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv4i16(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv1i32_nxv4i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv4i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv8i64(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv8i64(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv1i32_nxv8i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv8i64( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i8(i32*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8(,,,,, i32*, , , i64) @@ -13949,401 +2199,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv2i8(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv2i8(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv1i32_nxv2i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv2i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv2i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv2i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv8i32(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv8i32(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv1i32_nxv8i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv8i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv8i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv8i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv32i8(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv32i8(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv1i32_nxv32i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv32i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv16i32(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv16i32(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv1i32_nxv16i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv16i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv2i16(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv2i16(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv1i32_nxv2i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv2i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv2i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv2i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv2i64(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv2i64(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv1i32_nxv2i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv2i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv2i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv2i64( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv16i16(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv16i16(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv1i32_nxv16i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv16i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv32i16(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv32i16(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv1i32_nxv32i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv32i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv4i32(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv4i32(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv1i32_nxv4i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv4i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv16i8(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv16i8(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv1i32_nxv16i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv16i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i64(i32*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64(,,,,,, i32*, , , i64) @@ -14360,26 +2233,23 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i32(i32*, , i64) @@ -14398,104 +2268,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv8i16(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv8i16(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv1i32_nxv8i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv8i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv8i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv8i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv4i8(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv4i8(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv1i32_nxv4i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv4i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i16(i32*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16(,,,,,, i32*, , , i64) @@ -14512,256 +2303,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv2i32(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv2i32(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv1i32_nxv2i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv2i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv2i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv2i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv8i8(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv8i8(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv1i32_nxv8i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv8i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv8i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv8i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv4i64(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv4i64(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv1i32_nxv4i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv4i64( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv64i8(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv64i8(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv1i32_nxv64i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv64i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv4i16(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv4i16(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv1i32_nxv4i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv4i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv8i64(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv8i64(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv1i32_nxv8i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv8i64( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i8(i32*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8(,,,,,, i32*, , , i64) @@ -14778,412 +2338,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv2i8(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv2i8(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv1i32_nxv2i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv2i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv2i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv2i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv8i32(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv8i32(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv1i32_nxv8i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv8i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv8i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv8i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv32i8(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv32i8(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv1i32_nxv32i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv32i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv16i32(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv16i32(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv1i32_nxv16i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv16i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv2i16(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv2i16(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv1i32_nxv2i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv2i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv2i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv2i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv2i64(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv2i64(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv1i32_nxv2i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv2i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv2i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv2i64( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv16i16(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv16i16(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv1i32_nxv16i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv16i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv32i16(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv32i16(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv1i32_nxv32i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv32i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv4i32(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv4i32(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv1i32_nxv4i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv4i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv16i8(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv16i8(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv1i32_nxv16i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv16i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i64(i32*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64(,,,,,,, i32*, , , i64) @@ -15200,11 +2373,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -15212,15 +2384,13 @@ define @test_vloxseg7_mask_nxv1i32_nxv1i64(i32* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i32(i32*, , i64) @@ -15239,11 +2409,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -15251,95 +2420,15 @@ define @test_vloxseg7_mask_nxv1i32_nxv1i32(i32* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv8i16(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv8i16(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv1i32_nxv8i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv8i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv8i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv8i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv4i8(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv4i8(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv1i32_nxv4i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i16(i32*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16(,,,,,,, i32*, , , i64) @@ -15356,11 +2445,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -15368,251 +2456,15 @@ define @test_vloxseg7_mask_nxv1i32_nxv1i16(i32* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv2i32(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv2i32(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv1i32_nxv2i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv2i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv2i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv8i8(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv8i8(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv1i32_nxv8i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv8i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv8i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv4i64(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv4i64(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv1i32_nxv4i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv4i64( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv64i8(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv64i8(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv1i32_nxv64i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv64i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv4i16(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv4i16(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv1i32_nxv4i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv8i64(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv8i64(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv1i32_nxv8i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv8i64( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i8(i32*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8(,,,,,,, i32*, , , i64) @@ -15629,11 +2481,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -15641,411 +2492,15 @@ define @test_vloxseg7_mask_nxv1i32_nxv1i8(i32* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv2i8(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv2i8(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv1i32_nxv2i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv2i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv2i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv8i32(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv8i32(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv1i32_nxv8i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv8i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv8i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv8i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv32i8(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv32i8(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv1i32_nxv32i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv32i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv16i32(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv16i32(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv1i32_nxv16i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv16i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv2i16(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv2i16(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv1i32_nxv2i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv2i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv2i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv2i64(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv2i64(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv1i32_nxv2i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv2i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv2i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv2i64( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv16i16(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv16i16(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv1i32_nxv16i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv32i16(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv32i16(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv1i32_nxv32i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv4i32(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv4i32(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv1i32_nxv4i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv16i8(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv16i8(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv1i32_nxv16i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i64(i32*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64(,,,,,,,, i32*, , , i64) @@ -16062,28 +2517,25 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i32(i32*, , i64) @@ -16102,51 +2554,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv8i16(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv8i16(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv1i32_nxv8i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv8i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -16155,57 +2566,15 @@ define @test_vloxseg8_mask_nxv1i32_nxv8i16(i32* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv8i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv4i8(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv4i8(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv1i32_nxv4i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv4i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv1i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i16(i32*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16(,,,,,,,, i32*, , , i64) @@ -16222,270 +2591,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv2i32(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv2i32(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv1i32_nxv2i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv2i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv1i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv2i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv8i8(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv8i8(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv1i32_nxv8i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv8i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv8i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv4i64(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv4i64(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv1i32_nxv4i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv64i8(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv64i8(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv1i32_nxv64i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv4i16(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv4i16(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv1i32_nxv4i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv8i64(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv8i64(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv1i32_nxv8i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i8(i32*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8(,,,,,,,, i32*, , , i64) @@ -16502,251 +2628,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv2i8(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv2i8(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv1i32_nxv2i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv2i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv2i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv8i32(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv8i32(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv1i32_nxv8i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv8i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv8i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv32i8(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv32i8(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv1i32_nxv32i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv16i32(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv16i32(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv1i32_nxv16i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv2i16(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv2i16(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv1i32_nxv2i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv2i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv2i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv2i64(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv2i64(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv1i32_nxv2i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv2i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -16755,219 +2640,13 @@ define @test_vloxseg8_mask_nxv1i32_nxv2i64(i32* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv2i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv16i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv16i16(,, i16*, , , i64) - -define @test_vloxseg2_nxv8i16_nxv16i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv16i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv32i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv32i16(,, i16*, , , i64) - -define @test_vloxseg2_nxv8i16_nxv32i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv32i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv4i32(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv4i32(,, i16*, , , i64) - -define @test_vloxseg2_nxv8i16_nxv4i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv4i32( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv16i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv16i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv8i16_nxv16i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv16i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv1i64(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv1i64(,, i16*, , , i64) - -define @test_vloxseg2_nxv8i16_nxv1i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv1i64( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv1i32(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv1i32(,, i16*, , , i64) - -define @test_vloxseg2_nxv8i16_nxv1i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv1i32( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i16(i16*, , i64) @@ -16986,126 +2665,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv8i16_nxv8i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv4i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv4i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv8i16_nxv4i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv4i8(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16( %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv8i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv4i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv1i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv1i16(,, i16*, , , i64) - -define @test_vloxseg2_nxv8i16_nxv1i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv1i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv2i32(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv2i32(,, i16*, , , i64) - -define @test_vloxseg2_nxv8i16_nxv2i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv2i32( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i8(i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8(,, i16*, , , i64) @@ -17122,126 +2695,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv8i16_nxv8i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv4i64(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv4i64(,, i16*, , , i64) - -define @test_vloxseg2_nxv8i16_nxv4i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv4i64(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8( %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv8i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv4i64( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv64i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv64i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv8i16_nxv64i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv64i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv4i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv4i16(,, i16*, , , i64) - -define @test_vloxseg2_nxv8i16_nxv4i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv4i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i64(i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64(,, i16*, , , i64) @@ -17258,92 +2725,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv8i16_nxv8i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v16, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv1i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv1i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv8i16_nxv1i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv1i8(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64( %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv8i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv1i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv2i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv2i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv8i16_nxv2i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv2i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i32(i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32(,, i16*, , , i64) @@ -17360,370 +2755,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv8i16_nxv8i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv32i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv32i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv8i16_nxv32i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv32i8(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32( %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv8i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv32i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv16i32(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv16i32(,, i16*, , , i64) - -define @test_vloxseg2_nxv8i16_nxv16i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv16i32( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv2i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv2i16(,, i16*, , , i64) - -define @test_vloxseg2_nxv8i16_nxv2i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv2i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv2i64(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv2i64(,, i16*, , , i64) - -define @test_vloxseg2_nxv8i16_nxv2i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv2i64( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv16i16(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv16i16(,,, i16*, , , i64) - -define @test_vloxseg3_nxv8i16_nxv16i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv16i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv32i16(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv32i16(,,, i16*, , , i64) - -define @test_vloxseg3_nxv8i16_nxv32i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv32i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv4i32(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv4i32(,,, i16*, , , i64) - -define @test_vloxseg3_nxv8i16_nxv4i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv4i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv16i8(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv16i8(,,, i16*, , , i64) - -define @test_vloxseg3_nxv8i16_nxv16i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv16i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv1i64(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv1i64(,,, i16*, , , i64) - -define @test_vloxseg3_nxv8i16_nxv1i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv1i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv1i32(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv1i32(,,, i16*, , , i64) - -define @test_vloxseg3_nxv8i16_nxv1i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv1i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i16(i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16(,,, i16*, , , i64) @@ -17740,130 +2785,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv8i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv8i16_nxv8i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv4i8(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv4i8(,,, i16*, , , i64) - -define @test_vloxseg3_nxv8i16_nxv4i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv4i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv8i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv4i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv1i16(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv1i16(,,, i16*, , , i64) - -define @test_vloxseg3_nxv8i16_nxv1i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv1i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv2i32(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv2i32(,,, i16*, , , i64) - -define @test_vloxseg3_nxv8i16_nxv2i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv2i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i8(i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8(,,, i16*, , , i64) @@ -17880,130 +2817,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv8i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv8i16_nxv8i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv4i64(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv4i64(,,, i16*, , , i64) - -define @test_vloxseg3_nxv8i16_nxv4i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv4i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv8i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv4i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv64i8(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv64i8(,,, i16*, , , i64) - -define @test_vloxseg3_nxv8i16_nxv64i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv64i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv4i16(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv4i16(,,, i16*, , , i64) - -define @test_vloxseg3_nxv8i16_nxv4i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv4i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i64(i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64(,,, i16*, , , i64) @@ -18020,95 +2849,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv8i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv8i16_nxv8i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v16, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv1i8(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv1i8(,,, i16*, , , i64) - -define @test_vloxseg3_nxv8i16_nxv1i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv1i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv8i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv1i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv2i8(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv2i8(,,, i16*, , , i64) - -define @test_vloxseg3_nxv8i16_nxv2i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv2i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i32(i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32(,,, i16*, , , i64) @@ -18125,381 +2880,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv8i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv8i16_nxv8i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv32i8(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv32i8(,,, i16*, , , i64) - -define @test_vloxseg3_nxv8i16_nxv32i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv32i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv8i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv32i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv16i32(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv16i32(,,, i16*, , , i64) - -define @test_vloxseg3_nxv8i16_nxv16i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv16i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv2i16(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv2i16(,,, i16*, , , i64) - -define @test_vloxseg3_nxv8i16_nxv2i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv2i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv2i64(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv2i64(,,, i16*, , , i64) - -define @test_vloxseg3_nxv8i16_nxv2i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv2i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv16i16(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv16i16(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv8i16_nxv16i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv16i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv32i16(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv32i16(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv8i16_nxv32i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv32i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv4i32(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv4i32(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv8i16_nxv4i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv4i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv16i8(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv16i8(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv8i16_nxv16i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv16i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv1i64(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv1i64(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv8i16_nxv1i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv1i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv1i32(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv1i32(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv8i16_nxv1i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv1i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i16(i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16(,,,, i16*, , , i64) @@ -18516,134 +2911,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv8i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv8i16_nxv8i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv4i8(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv4i8(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv8i16_nxv4i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv4i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv8i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv4i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv1i16(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv1i16(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv8i16_nxv1i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv1i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv2i32(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv2i32(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv8i16_nxv2i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv2i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i8(i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8(,,,, i16*, , , i64) @@ -18660,134 +2944,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv8i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv8i16_nxv8i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv4i64(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv4i64(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv8i16_nxv4i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv4i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv64i8(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv64i8(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv8i16_nxv64i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv64i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv8i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv64i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv4i16(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv4i16(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv8i16_nxv4i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv4i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i64(i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64(,,,, i16*, , , i64) @@ -18804,98 +2977,22 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv8i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv8i16_nxv8i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: vloxseg4ei64.v v6, (a0), v16, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv1i8(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv1i8(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv8i16_nxv1i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv1i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv8i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv1i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv2i8(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv2i8(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv8i16_nxv2i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv2i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i32(i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32(,,,, i16*, , , i64) @@ -18912,238 +3009,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv8i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv8i16_nxv8i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv32i8(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv32i8(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv8i16_nxv32i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv32i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv16i32(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv16i32(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv8i16_nxv16i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv16i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv2i16(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv2i16(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv8i16_nxv2i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv2i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv8i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv2i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv2i64(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv2i64(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv8i16_nxv2i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv2i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv16i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv16i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv4i8_nxv16i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv16i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv32i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv32i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv4i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv32i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i32(i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32(,, i8*, , , i64) @@ -19160,160 +3042,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv16i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv16i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv4i8_nxv16i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv16i8(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv16i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv1i64(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv1i64(,, i8*, , , i64) - -define @test_vloxseg2_nxv4i8_nxv1i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv1i64( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv1i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv1i32(,, i8*, , , i64) - -define @test_vloxseg2_nxv4i8_nxv1i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv1i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv8i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv8i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv4i8_nxv8i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv8i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i8(i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8(,, i8*, , , i64) @@ -19330,126 +3072,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv1i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv1i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv4i8_nxv1i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv1i16(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv1i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv2i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv2i32(,, i8*, , , i64) - -define @test_vloxseg2_nxv4i8_nxv2i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv2i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv8i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv8i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv4i8_nxv8i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv8i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i64(i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64(,, i8*, , , i64) @@ -19466,58 +3102,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv4i8_nxv4i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv64i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv64i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv4i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv64i8(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64( %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv64i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i16(i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16(,, i8*, , , i64) @@ -19534,366 +3132,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv8i64(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv8i64(,, i8*, , , i64) - -define @test_vloxseg2_nxv4i8_nxv8i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv8i64(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv8i64( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv1i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv1i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv4i8_nxv1i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv1i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv2i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv2i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv4i8_nxv2i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv2i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv8i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv8i32(,, i8*, , , i64) - -define @test_vloxseg2_nxv4i8_nxv8i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv8i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv32i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv32i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv4i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv32i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv16i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv16i32(,, i8*, , , i64) - -define @test_vloxseg2_nxv4i8_nxv16i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv16i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv2i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv2i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv4i8_nxv2i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv2i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv2i64(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv2i64(,, i8*, , , i64) - -define @test_vloxseg2_nxv4i8_nxv2i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv2i64( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv16i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv16i16(,,, i8*, , , i64) - -define @test_vloxseg3_nxv4i8_nxv16i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv16i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv32i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv32i16(,,, i8*, , , i64) - -define @test_vloxseg3_nxv4i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv32i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i32(i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32(,,, i8*, , , i64) @@ -19910,165 +3162,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv16i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv16i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv4i8_nxv16i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv16i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv16i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv1i64(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv1i64(,,, i8*, , , i64) - -define @test_vloxseg3_nxv4i8_nxv1i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv1i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv1i32(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv1i32(,,, i8*, , , i64) - -define @test_vloxseg3_nxv4i8_nxv1i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv1i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv8i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv8i16(,,, i8*, , , i64) - -define @test_vloxseg3_nxv4i8_nxv8i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv8i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i8(i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8(,,, i8*, , , i64) @@ -20085,130 +3193,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv1i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv1i16(,,, i8*, , , i64) - -define @test_vloxseg3_nxv4i8_nxv1i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv1i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv1i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv2i32(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv2i32(,,, i8*, , , i64) - -define @test_vloxseg3_nxv4i8_nxv2i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv2i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv8i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv8i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv4i8_nxv8i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv8i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i64(i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64(,,, i8*, , , i64) @@ -20225,60 +3225,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv4i8_nxv4i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv64i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv64i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv4i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv64i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv64i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i16(i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16(,,, i8*, , , i64) @@ -20295,377 +3256,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv8i64(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv8i64(,,, i8*, , , i64) - -define @test_vloxseg3_nxv4i8_nxv8i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv8i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv8i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv1i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv1i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv4i8_nxv1i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv1i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv2i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv2i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv4i8_nxv2i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv2i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv8i32(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv8i32(,,, i8*, , , i64) - -define @test_vloxseg3_nxv4i8_nxv8i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv8i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv32i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv32i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv4i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv32i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv16i32(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv16i32(,,, i8*, , , i64) - -define @test_vloxseg3_nxv4i8_nxv16i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv16i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv2i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv2i16(,,, i8*, , , i64) - -define @test_vloxseg3_nxv4i8_nxv2i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv2i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv2i64(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv2i64(,,, i8*, , , i64) - -define @test_vloxseg3_nxv4i8_nxv2i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv2i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv16i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv16i16(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv4i8_nxv16i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv16i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv32i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv32i16(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv4i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv32i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i32(i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32(,,,, i8*, , , i64) @@ -20682,170 +3288,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv16i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv16i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv4i8_nxv16i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv16i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv16i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv1i64(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv1i64(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv4i8_nxv1i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv1i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv1i32(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv1i32(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv4i8_nxv1i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv1i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv8i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv8i16(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv4i8_nxv8i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv8i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i8(i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8(,,,, i8*, , , i64) @@ -20862,134 +3321,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv1i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv1i16(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv4i8_nxv1i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv1i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv1i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv2i32(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv2i32(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv4i8_nxv2i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv2i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv8i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv8i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv4i8_nxv8i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv8i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i64(i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64(,,,, i8*, , , i64) @@ -21006,62 +3354,22 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv4i8_nxv4i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv64i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv64i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv4i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv64i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv64i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i16(i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16(,,,, i8*, , , i64) @@ -21078,388 +3386,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv8i64(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv8i64(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv4i8_nxv8i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv8i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv8i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv1i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv1i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv4i8_nxv1i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv1i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv2i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv2i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv4i8_nxv2i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv2i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv8i32(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv8i32(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv4i8_nxv8i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv8i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv32i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv32i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv4i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv32i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv16i32(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv16i32(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv4i8_nxv16i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv16i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv2i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv2i16(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv4i8_nxv2i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv2i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv2i64(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv2i64(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv4i8_nxv2i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv2i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv16i16(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv16i16(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv4i8_nxv16i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv16i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv32i16(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv32i16(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv4i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv32i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i32(i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32(,,,,, i8*, , , i64) @@ -21476,175 +3419,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv16i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv16i8(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv4i8_nxv16i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv16i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv4i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv16i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv1i64(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv1i64(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv4i8_nxv1i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv1i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv1i32(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv1i32(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv4i8_nxv1i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv1i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv8i16(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv8i16(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv4i8_nxv8i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv8i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i8(i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8(,,,,, i8*, , , i64) @@ -21661,138 +3453,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv1i16(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv1i16(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv4i8_nxv1i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv1i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv4i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv1i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv2i32(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv2i32(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv4i8_nxv2i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv2i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv8i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv8i8(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv4i8_nxv8i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv8i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i64(i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64(,,,,, i8*, , , i64) @@ -21809,64 +3487,23 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv4i8_nxv4i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv64i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv64i8(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv4i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv64i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv4i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv64i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i16(i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16(,,,,, i8*, , , i64) @@ -21883,399 +3520,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv8i64(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv8i64(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv4i8_nxv8i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv8i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv4i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv8i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv1i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv1i8(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv4i8_nxv1i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv1i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv2i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv2i8(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv4i8_nxv2i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv2i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv8i32(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv8i32(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv4i8_nxv8i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv8i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv32i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv32i8(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv4i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv32i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv16i32(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv16i32(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv4i8_nxv16i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv16i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv2i16(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv2i16(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv4i8_nxv2i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv2i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv2i64(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv2i64(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv4i8_nxv2i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv2i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv16i16(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv16i16(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv4i8_nxv16i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv16i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv32i16(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv32i16(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv4i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv32i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i32(i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32(,,,,,, i8*, , , i64) @@ -22292,180 +3554,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv16i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv16i8(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv4i8_nxv16i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv16i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv4i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv16i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv1i64(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv1i64(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv4i8_nxv1i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv1i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv1i32(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv1i32(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv4i8_nxv1i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv1i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv8i16(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv8i16(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv4i8_nxv8i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv8i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i8(i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8(,,,,,, i8*, , , i64) @@ -22482,142 +3589,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv1i16(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv1i16(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv4i8_nxv1i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv1i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv4i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv1i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv2i32(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv2i32(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv4i8_nxv2i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv2i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv8i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv8i8(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv4i8_nxv8i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv8i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i64(i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64(,,,,,, i8*, , , i64) @@ -22634,66 +3624,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv4i8_nxv4i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv64i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv64i8(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv4i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv64i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv4i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv64i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i16(i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16(,,,,,, i8*, , , i64) @@ -22710,410 +3659,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv8i64(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv8i64(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv4i8_nxv8i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv8i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv4i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv8i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv1i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv1i8(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv4i8_nxv1i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv1i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv2i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv2i8(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv4i8_nxv2i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv2i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv8i32(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv8i32(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv4i8_nxv8i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv8i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv32i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv32i8(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv4i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv32i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv16i32(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv16i32(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv4i8_nxv16i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv16i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv2i16(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv2i16(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv4i8_nxv2i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv2i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv2i64(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv2i64(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv4i8_nxv2i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv2i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv16i16(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv16i16(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv4i8_nxv16i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv16i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv32i16(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv32i16(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv4i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv32i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i32(i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32(,,,,,,, i8*, , , i64) @@ -23130,11 +3694,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -23142,173 +3705,15 @@ define @test_vloxseg7_mask_nxv4i8_nxv4i32(i8* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv16i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv16i8(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv4i8_nxv16i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv16i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv4i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv16i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv1i64(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv1i64(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv4i8_nxv1i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv1i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv1i32(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv1i32(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv4i8_nxv1i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv8i16(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv8i16(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv4i8_nxv8i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv8i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i8(i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8(,,,,,,, i8*, , , i64) @@ -23325,11 +3730,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -23337,134 +3741,15 @@ define @test_vloxseg7_mask_nxv4i8_nxv4i8(i8* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv1i16(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv1i16(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv4i8_nxv1i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv1i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv4i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv2i32(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv2i32(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv4i8_nxv2i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv8i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv8i8(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv4i8_nxv8i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i64(i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64(,,,,,,, i8*, , , i64) @@ -23481,11 +3766,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv4i8_nxv4i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -23493,56 +3777,15 @@ define @test_vloxseg7_mask_nxv4i8_nxv4i64(i8* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv64i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv64i8(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv4i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv64i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv4i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv64i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i16(i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16(,,,,,,, i8*, , , i64) @@ -23559,11 +3802,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -23571,409 +3813,15 @@ define @test_vloxseg7_mask_nxv4i8_nxv4i16(i8* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv8i64(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv8i64(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv4i8_nxv8i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv8i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv4i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv8i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv1i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv1i8(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv4i8_nxv1i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv2i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv2i8(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv4i8_nxv2i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv8i32(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv8i32(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv4i8_nxv8i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv8i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv32i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv32i8(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv4i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv32i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv16i32(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv16i32(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv4i8_nxv16i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv16i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv2i16(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv2i16(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv4i8_nxv2i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv2i64(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv2i64(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv4i8_nxv2i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv2i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv16i16(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv16i16(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv4i8_nxv16i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv32i16(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv32i16(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv4i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i32(i8*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32(,,,,,,,, i8*, , , i64) @@ -23990,190 +3838,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv16i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv16i8(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv4i8_nxv16i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv16i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv4i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv1i64(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv1i64(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv4i8_nxv1i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv1i32(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv1i32(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv4i8_nxv1i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv8i16(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv8i16(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv4i8_nxv8i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i8(i8*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8(,,,,,,,, i8*, , , i64) @@ -24190,150 +3875,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv1i16(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv1i16(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv4i8_nxv1i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv1i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv4i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv2i32(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv2i32(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv4i8_nxv2i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv8i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv8i8(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv4i8_nxv8i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i64(i8*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64(,,,,,,,, i8*, , , i64) @@ -24350,51 +3912,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv4i8_nxv4i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv64i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv64i8(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv4i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vmv1r.v v17, v16 ; CHECK-NEXT: vmv1r.v v18, v16 ; CHECK-NEXT: vmv1r.v v19, v16 @@ -24403,15 +3924,13 @@ define @test_vloxseg8_mask_nxv4i8_nxv64i8(i8* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i16(i8*, , i64) @@ -24430,331 +3949,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv8i64(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv8i64(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv4i8_nxv8i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv1i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv1i8(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv4i8_nxv1i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv2i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv2i8(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv4i8_nxv2i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv8i32(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv8i32(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv4i8_nxv8i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv32i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv32i8(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv4i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv16i32(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv16i32(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv4i8_nxv16i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv2i16(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv2i16(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv4i8_nxv2i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv2i64(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv2i64(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv4i8_nxv2i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -24763,151 +3961,13 @@ define @test_vloxseg8_mask_nxv4i8_nxv2i64(i8* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv16i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv16i16(,, i16*, , , i64) - -define @test_vloxseg2_nxv1i16_nxv16i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv16i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv32i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv32i16(,, i16*, , , i64) - -define @test_vloxseg2_nxv1i16_nxv32i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv32i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv4i32(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv4i32(,, i16*, , , i64) - -define @test_vloxseg2_nxv1i16_nxv4i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv4i32( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv16i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv16i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv1i16_nxv16i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv16i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i64(i16*, , i64) @@ -24926,22 +3986,18 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i32(i16*, , i64) @@ -24960,92 +4016,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv8i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv8i16(,, i16*, , , i64) - -define @test_vloxseg2_nxv1i16_nxv8i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv8i16(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32( %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv8i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv4i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv4i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv1i16_nxv4i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv4i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i16(i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16(,, i16*, , , i64) @@ -25062,228 +4046,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv2i32(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv2i32(,, i16*, , , i64) - -define @test_vloxseg2_nxv1i16_nxv2i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv2i32(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16( %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv2i32( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv8i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv8i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv1i16_nxv8i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv8i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv4i64(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv4i64(,, i16*, , , i64) - -define @test_vloxseg2_nxv1i16_nxv4i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv4i64( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv64i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv64i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv1i16_nxv64i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv64i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv4i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv4i16(,, i16*, , , i64) - -define @test_vloxseg2_nxv1i16_nxv4i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv4i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv8i64(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv8i64(,, i16*, , , i64) - -define @test_vloxseg2_nxv1i16_nxv8i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv8i64( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i8(i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8(,, i16*, , , i64) @@ -25300,368 +4076,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv2i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv2i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv1i16_nxv2i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv2i8(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8( %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv2i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv8i32(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv8i32(,, i16*, , , i64) - -define @test_vloxseg2_nxv1i16_nxv8i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv8i32( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv32i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv32i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv1i16_nxv32i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv32i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv16i32(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv16i32(,, i16*, , , i64) - -define @test_vloxseg2_nxv1i16_nxv16i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv16i32( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv2i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv2i16(,, i16*, , , i64) - -define @test_vloxseg2_nxv1i16_nxv2i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv2i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv2i64(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv2i64(,, i16*, , , i64) - -define @test_vloxseg2_nxv1i16_nxv2i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv2i64( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv16i16(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv16i16(,,, i16*, , , i64) - -define @test_vloxseg3_nxv1i16_nxv16i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv16i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv32i16(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv32i16(,,, i16*, , , i64) - -define @test_vloxseg3_nxv1i16_nxv32i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv32i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv4i32(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv4i32(,,, i16*, , , i64) - -define @test_vloxseg3_nxv1i16_nxv4i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv4i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv16i8(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv16i8(,,, i16*, , , i64) - -define @test_vloxseg3_nxv1i16_nxv16i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv16i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i64(i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64(,,, i16*, , , i64) @@ -25678,23 +4106,20 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i32(i16*, , i64) @@ -25713,95 +4138,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv8i16(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv8i16(,,, i16*, , , i64) - -define @test_vloxseg3_nxv1i16_nxv8i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv8i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv8i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv4i8(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv4i8(,,, i16*, , , i64) - -define @test_vloxseg3_nxv1i16_nxv4i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv4i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i16(i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16(,,, i16*, , , i64) @@ -25818,235 +4170,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv2i32(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv2i32(,,, i16*, , , i64) - -define @test_vloxseg3_nxv1i16_nxv2i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv2i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv2i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv8i8(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv8i8(,,, i16*, , , i64) - -define @test_vloxseg3_nxv1i16_nxv8i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv8i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv4i64(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv4i64(,,, i16*, , , i64) - -define @test_vloxseg3_nxv1i16_nxv4i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv4i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv64i8(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv64i8(,,, i16*, , , i64) - -define @test_vloxseg3_nxv1i16_nxv64i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv64i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv4i16(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv4i16(,,, i16*, , , i64) - -define @test_vloxseg3_nxv1i16_nxv4i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv4i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv8i64(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv8i64(,,, i16*, , , i64) - -define @test_vloxseg3_nxv1i16_nxv8i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv8i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i8(i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8(,,, i16*, , , i64) @@ -26063,379 +4202,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv2i8(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv2i8(,,, i16*, , , i64) - -define @test_vloxseg3_nxv1i16_nxv2i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv2i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv2i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv8i32(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv8i32(,,, i16*, , , i64) - -define @test_vloxseg3_nxv1i16_nxv8i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv8i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv32i8(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv32i8(,,, i16*, , , i64) - -define @test_vloxseg3_nxv1i16_nxv32i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv32i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv16i32(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv16i32(,,, i16*, , , i64) - -define @test_vloxseg3_nxv1i16_nxv16i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv16i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv2i16(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv2i16(,,, i16*, , , i64) - -define @test_vloxseg3_nxv1i16_nxv2i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv2i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv2i64(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv2i64(,,, i16*, , , i64) - -define @test_vloxseg3_nxv1i16_nxv2i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv2i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv16i16(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv16i16(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv1i16_nxv16i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv16i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv32i16(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv32i16(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv1i16_nxv32i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv32i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv4i32(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv4i32(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv1i16_nxv4i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv4i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv16i8(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv16i8(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv1i16_nxv16i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv16i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i64(i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64(,,,, i16*, , , i64) @@ -26452,24 +4234,21 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i32(i16*, , i64) @@ -26488,98 +4267,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv8i16(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv8i16(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv1i16_nxv8i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv8i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv8i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv4i8(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv4i8(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv1i16_nxv4i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv4i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i16(i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16(,,,, i16*, , , i64) @@ -26596,242 +4300,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv2i32(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv2i32(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv1i16_nxv2i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv2i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv2i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv8i8(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv8i8(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv1i16_nxv8i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv8i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv4i64(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv4i64(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv1i16_nxv4i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv4i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv64i8(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv64i8(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv1i16_nxv64i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv64i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv4i16(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv4i16(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv1i16_nxv4i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv4i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv8i64(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv8i64(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv1i16_nxv8i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv8i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i8(i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8(,,,, i16*, , , i64) @@ -26848,390 +4333,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv2i8(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv2i8(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv1i16_nxv2i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv2i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv2i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv8i32(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv8i32(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv1i16_nxv8i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv8i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv32i8(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv32i8(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv1i16_nxv32i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv32i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv16i32(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv16i32(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv1i16_nxv16i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv16i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv2i16(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv2i16(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv1i16_nxv2i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv2i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv2i64(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv2i64(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv1i16_nxv2i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv2i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv16i16(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv16i16(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv1i16_nxv16i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv16i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv32i16(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv32i16(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv1i16_nxv32i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv32i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv4i32(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv4i32(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv1i16_nxv4i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv4i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv16i8(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv16i8(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv1i16_nxv16i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv16i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i64(i16*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64(,,,,, i16*, , , i64) @@ -27248,25 +4366,22 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i32(i16*, , i64) @@ -27285,101 +4400,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv8i16(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv8i16(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv1i16_nxv8i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv8i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv8i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv4i8(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv4i8(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv1i16_nxv4i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv4i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i16(i16*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16(,,,,, i16*, , , i64) @@ -27396,249 +4434,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv2i32(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv2i32(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv1i16_nxv2i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv2i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv2i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv8i8(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv8i8(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv1i16_nxv8i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv8i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv4i64(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv4i64(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv1i16_nxv4i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv4i64( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv64i8(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv64i8(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv1i16_nxv64i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv64i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv4i16(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv4i16(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv1i16_nxv4i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv4i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv8i64(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv8i64(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv1i16_nxv8i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv8i64( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i8(i16*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8(,,,,, i16*, , , i64) @@ -27655,401 +4468,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv2i8(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv2i8(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv1i16_nxv2i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv2i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv2i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv8i32(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv8i32(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv1i16_nxv8i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv8i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv32i8(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv32i8(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv1i16_nxv32i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv32i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv16i32(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv16i32(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv1i16_nxv16i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv16i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv2i16(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv2i16(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv1i16_nxv2i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv2i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv2i64(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv2i64(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv1i16_nxv2i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv2i64( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv16i16(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv16i16(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv1i16_nxv16i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv16i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv32i16(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv32i16(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv1i16_nxv32i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv32i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv4i32(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv4i32(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv1i16_nxv4i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv4i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv16i8(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv16i8(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv1i16_nxv16i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv16i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i64(i16*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64(,,,,,, i16*, , , i64) @@ -28066,26 +4502,23 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i32(i16*, , i64) @@ -28104,104 +4537,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv8i16(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv8i16(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv1i16_nxv8i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv8i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv8i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv4i8(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv4i8(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv1i16_nxv4i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv4i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i16(i16*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16(,,,,,, i16*, , , i64) @@ -28218,256 +4572,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv2i32(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv2i32(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv1i16_nxv2i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv2i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv2i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv8i8(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv8i8(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv1i16_nxv8i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv8i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv4i64(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv4i64(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv1i16_nxv4i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv4i64( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv64i8(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv64i8(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv1i16_nxv64i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv64i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv4i16(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv4i16(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv1i16_nxv4i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv4i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv8i64(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv8i64(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv1i16_nxv8i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv8i64( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i8(i16*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8(,,,,,, i16*, , , i64) @@ -28484,412 +4607,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv2i8(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv2i8(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv1i16_nxv2i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv2i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv2i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv8i32(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv8i32(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv1i16_nxv8i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv8i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv32i8(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv32i8(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv1i16_nxv32i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv32i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv16i32(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv16i32(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv1i16_nxv16i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv16i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv2i16(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv2i16(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv1i16_nxv2i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv2i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv2i64(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv2i64(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv1i16_nxv2i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv2i64( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv16i16(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv16i16(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv1i16_nxv16i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv32i16(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv32i16(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv1i16_nxv32i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv4i32(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv4i32(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv1i16_nxv4i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv16i8(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv16i8(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv1i16_nxv16i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i64(i16*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64(,,,,,,, i16*, , , i64) @@ -28906,11 +4642,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -28918,15 +4653,13 @@ define @test_vloxseg7_mask_nxv1i16_nxv1i64(i16* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i32(i16*, , i64) @@ -28945,11 +4678,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -28957,95 +4689,15 @@ define @test_vloxseg7_mask_nxv1i16_nxv1i32(i16* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv8i16(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv8i16(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv1i16_nxv8i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv8i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv4i8(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv4i8(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv1i16_nxv4i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i16(i16*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16(,,,,,,, i16*, , , i64) @@ -29062,11 +4714,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -29074,251 +4725,15 @@ define @test_vloxseg7_mask_nxv1i16_nxv1i16(i16* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv2i32(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv2i32(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv1i16_nxv2i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv2i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv8i8(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv8i8(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv1i16_nxv8i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv4i64(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv4i64(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv1i16_nxv4i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv4i64( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv64i8(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv64i8(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv1i16_nxv64i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv4i16(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv4i16(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv1i16_nxv4i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv8i64(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv8i64(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv1i16_nxv8i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv8i64( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i8(i16*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8(,,,,,,, i16*, , , i64) @@ -29335,11 +4750,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -29347,411 +4761,15 @@ define @test_vloxseg7_mask_nxv1i16_nxv1i8(i16* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv2i8(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv2i8(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv1i16_nxv2i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv2i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv8i32(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv8i32(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv1i16_nxv8i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv32i8(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv32i8(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv1i16_nxv32i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv16i32(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv16i32(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv1i16_nxv16i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv2i16(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv2i16(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv1i16_nxv2i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv2i64(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv2i64(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv1i16_nxv2i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv2i64( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv16i16(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv16i16(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv1i16_nxv16i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv32i16(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv32i16(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv1i16_nxv32i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv4i32(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv4i32(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv1i16_nxv4i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv16i8(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv16i8(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv1i16_nxv16i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i64(i16*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64(,,,,,,,, i16*, , , i64) @@ -29768,28 +4786,25 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i32(i16*, , i64) @@ -29808,51 +4823,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv8i16(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv8i16(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv1i16_nxv8i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -29861,57 +4835,15 @@ define @test_vloxseg8_mask_nxv1i16_nxv8i16(i16* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv4i8(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv4i8(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv1i16_nxv4i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv4i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv1i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i16(i16*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16(,,,,,,,, i16*, , , i64) @@ -29928,270 +4860,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv2i32(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv2i32(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv1i16_nxv2i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv2i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv1i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv8i8(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv8i8(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv1i16_nxv8i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv4i64(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv4i64(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv1i16_nxv4i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv64i8(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv64i8(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv1i16_nxv64i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv4i16(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv4i16(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv1i16_nxv4i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv8i64(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv8i64(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv1i16_nxv8i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i8(i16*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8(,,,,,,,, i16*, , , i64) @@ -30208,251 +4897,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv2i8(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv2i8(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv1i16_nxv2i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv8i32(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv8i32(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv1i16_nxv8i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv32i8(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv32i8(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv1i16_nxv32i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv16i32(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv16i32(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv1i16_nxv16i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv2i16(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv2i16(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv1i16_nxv2i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv2i64(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv2i64(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv1i16_nxv2i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -30461,321 +4909,13 @@ define @test_vloxseg8_mask_nxv1i16_nxv2i64(i16* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv16i16(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv16i16(,, i32*, , , i64) - -define @test_vloxseg2_nxv2i32_nxv16i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv16i16( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv32i16(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv32i16(,, i32*, , , i64) - -define @test_vloxseg2_nxv2i32_nxv32i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv32i16( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv4i32(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv4i32(,, i32*, , , i64) - -define @test_vloxseg2_nxv2i32_nxv4i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv4i32( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv16i8(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv16i8(,, i32*, , , i64) - -define @test_vloxseg2_nxv2i32_nxv16i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv16i8( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv1i64(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv1i64(,, i32*, , , i64) - -define @test_vloxseg2_nxv2i32_nxv1i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv1i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv1i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv1i64( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv1i32(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv1i32(,, i32*, , , i64) - -define @test_vloxseg2_nxv2i32_nxv1i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv1i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv1i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv1i32( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv8i16(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv8i16(,, i32*, , , i64) - -define @test_vloxseg2_nxv2i32_nxv8i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv8i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv8i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv8i16( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv4i8(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv4i8(,, i32*, , , i64) - -define @test_vloxseg2_nxv2i32_nxv4i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv4i8( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv1i16(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv1i16(,, i32*, , , i64) - -define @test_vloxseg2_nxv2i32_nxv1i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv1i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv1i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv1i16( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i32(i32*, , i64) @@ -30794,228 +4934,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv8i8(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv8i8(,, i32*, , , i64) - -define @test_vloxseg2_nxv2i32_nxv8i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv8i8(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32( %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv8i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv8i8( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv4i64(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv4i64(,, i32*, , , i64) - -define @test_vloxseg2_nxv2i32_nxv4i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv4i64( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv64i8(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv64i8(,, i32*, , , i64) - -define @test_vloxseg2_nxv2i32_nxv64i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv64i8( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv4i16(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv4i16(,, i32*, , , i64) - -define @test_vloxseg2_nxv2i32_nxv4i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv4i16( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv8i64(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv8i64(,, i32*, , , i64) - -define @test_vloxseg2_nxv2i32_nxv8i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv8i64( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv1i8(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv1i8(,, i32*, , , i64) - -define @test_vloxseg2_nxv2i32_nxv1i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv1i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv1i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv1i8( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i8(i32*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8(,, i32*, , , i64) @@ -31032,126 +4964,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv8i32(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv8i32(,, i32*, , , i64) - -define @test_vloxseg2_nxv2i32_nxv8i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv8i32(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8( %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv8i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv8i32( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv32i8(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv32i8(,, i32*, , , i64) - -define @test_vloxseg2_nxv2i32_nxv32i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv32i8( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv16i32(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv16i32(,, i32*, , , i64) - -define @test_vloxseg2_nxv2i32_nxv16i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv16i32( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i16(i32*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16(,, i32*, , , i64) @@ -31168,22 +4994,18 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i64(i32*, , i64) @@ -31202,339 +5024,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv16i16(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv16i16(,,, i32*, , , i64) - -define @test_vloxseg3_nxv2i32_nxv16i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64( %val, %val, i32* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv16i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv32i16(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv32i16(,,, i32*, , , i64) - -define @test_vloxseg3_nxv2i32_nxv32i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv32i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv4i32(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv4i32(,,, i32*, , , i64) - -define @test_vloxseg3_nxv2i32_nxv4i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv4i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv16i8(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv16i8(,,, i32*, , , i64) - -define @test_vloxseg3_nxv2i32_nxv16i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv16i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv1i64(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv1i64(,,, i32*, , , i64) - -define @test_vloxseg3_nxv2i32_nxv1i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv1i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv1i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv1i64( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv1i32(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv1i32(,,, i32*, , , i64) - -define @test_vloxseg3_nxv2i32_nxv1i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv1i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv1i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv1i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv8i16(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv8i16(,,, i32*, , , i64) - -define @test_vloxseg3_nxv2i32_nxv8i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv8i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv8i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv8i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv4i8(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv4i8(,,, i32*, , , i64) - -define @test_vloxseg3_nxv2i32_nxv4i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv4i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv1i16(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv1i16(,,, i32*, , , i64) - -define @test_vloxseg3_nxv2i32_nxv1i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv1i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv1i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv1i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i32(i32*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32(,,, i32*, , , i64) @@ -31551,235 +5054,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv8i8(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv8i8(,,, i32*, , , i64) - -define @test_vloxseg3_nxv2i32_nxv8i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv8i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv8i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv8i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv4i64(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv4i64(,,, i32*, , , i64) - -define @test_vloxseg3_nxv2i32_nxv4i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv4i64( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv64i8(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv64i8(,,, i32*, , , i64) - -define @test_vloxseg3_nxv2i32_nxv64i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv64i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv4i16(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv4i16(,,, i32*, , , i64) - -define @test_vloxseg3_nxv2i32_nxv4i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv4i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv8i64(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv8i64(,,, i32*, , , i64) - -define @test_vloxseg3_nxv2i32_nxv8i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv8i64( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv1i8(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv1i8(,,, i32*, , , i64) - -define @test_vloxseg3_nxv2i32_nxv1i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv1i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv1i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv1i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i8(i32*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8(,,, i32*, , , i64) @@ -31796,130 +5086,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv8i32(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv8i32(,,, i32*, , , i64) - -define @test_vloxseg3_nxv2i32_nxv8i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv8i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv8i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv8i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv32i8(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv32i8(,,, i32*, , , i64) - -define @test_vloxseg3_nxv2i32_nxv32i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv32i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv16i32(i32*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv16i32(,,, i32*, , , i64) - -define @test_vloxseg3_nxv2i32_nxv16i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv16i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i16(i32*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16(,,, i32*, , , i64) @@ -31936,23 +5118,20 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i64(i32*, , i64) @@ -31971,349 +5150,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv16i16(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv16i16(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv2i32_nxv16i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv16i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv32i16(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv32i16(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv2i32_nxv32i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv32i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv4i32(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv4i32(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv2i32_nxv4i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv4i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv16i8(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv16i8(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv2i32_nxv16i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv16i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv1i64(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv1i64(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv2i32_nxv1i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv1i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv1i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv1i64( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv1i32(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv1i32(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv2i32_nxv1i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv1i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv1i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv1i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv8i16(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv8i16(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv2i32_nxv8i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv8i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv8i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv8i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv4i8(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv4i8(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv2i32_nxv4i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv4i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv1i16(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv1i16(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv2i32_nxv1i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv1i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv1i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv1i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i32(i32*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32(,,,, i32*, , , i64) @@ -32330,242 +5181,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv8i8(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv8i8(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv2i32_nxv8i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv8i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv8i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv8i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv4i64(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv4i64(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv2i32_nxv4i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv4i64( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv64i8(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv64i8(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv2i32_nxv64i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv64i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv4i16(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv4i16(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv2i32_nxv4i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv4i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv8i64(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv8i64(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv2i32_nxv8i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv8i64( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv1i8(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv1i8(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv2i32_nxv1i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv1i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv1i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv1i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i8(i32*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8(,,,, i32*, , , i64) @@ -32582,134 +5214,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv8i32(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv8i32(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv2i32_nxv8i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv8i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv8i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv8i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv32i8(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv32i8(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv2i32_nxv32i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv32i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv16i32(i32*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv16i32(,,,, i32*, , , i64) - -define @test_vloxseg4_nxv2i32_nxv16i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv16i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i16(i32*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16(,,,, i32*, , , i64) @@ -32726,24 +5247,21 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i64(i32*, , i64) @@ -32762,359 +5280,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv16i16(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv16i16(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv2i32_nxv16i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv16i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv32i16(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv32i16(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv2i32_nxv32i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv32i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv4i32(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv4i32(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv2i32_nxv4i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv4i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv16i8(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv16i8(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv2i32_nxv16i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv16i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv1i64(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv1i64(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv2i32_nxv1i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv1i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv1i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv1i64( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv1i32(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv1i32(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv2i32_nxv1i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv1i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv1i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv1i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv8i16(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv8i16(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv2i32_nxv8i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv8i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv8i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv8i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv4i8(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv4i8(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv2i32_nxv4i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv4i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv1i16(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv1i16(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv2i32_nxv1i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv1i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv1i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv1i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i32(i32*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32(,,,,, i32*, , , i64) @@ -33131,249 +5313,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv8i8(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv8i8(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv2i32_nxv8i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv8i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv8i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv8i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv4i64(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv4i64(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv2i32_nxv4i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv4i64( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv64i8(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv64i8(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv2i32_nxv64i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv64i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv4i16(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv4i16(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv2i32_nxv4i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv4i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv8i64(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv8i64(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv2i32_nxv8i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv8i64( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv1i8(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv1i8(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv2i32_nxv1i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv1i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv1i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv1i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i8(i32*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8(,,,,, i32*, , , i64) @@ -33390,138 +5347,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv8i32(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv8i32(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv2i32_nxv8i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv8i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv8i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv8i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv32i8(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv32i8(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv2i32_nxv32i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv32i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv16i32(i32*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv16i32(,,,,, i32*, , , i64) - -define @test_vloxseg5_nxv2i32_nxv16i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv16i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i16(i32*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16(,,,,, i32*, , , i64) @@ -33538,25 +5381,22 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i64(i32*, , i64) @@ -33575,369 +5415,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv16i16(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv16i16(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv2i32_nxv16i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv16i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv32i16(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv32i16(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv2i32_nxv32i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv32i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv4i32(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv4i32(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv2i32_nxv4i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv4i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv16i8(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv16i8(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv2i32_nxv16i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv16i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv1i64(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv1i64(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv2i32_nxv1i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv1i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv1i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv1i64( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv1i32(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv1i32(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv2i32_nxv1i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv1i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv1i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv1i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv8i16(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv8i16(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv2i32_nxv8i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv8i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv8i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv8i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv4i8(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv4i8(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv2i32_nxv4i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv4i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv1i16(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv1i16(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv2i32_nxv1i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv1i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv1i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv1i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i32(i32*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32(,,,,,, i32*, , , i64) @@ -33954,256 +5449,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv8i8(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv8i8(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv2i32_nxv8i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv8i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv8i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv8i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv4i64(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv4i64(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv2i32_nxv4i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv4i64( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv64i8(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv64i8(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv2i32_nxv64i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv64i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv4i16(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv4i16(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv2i32_nxv4i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv4i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv8i64(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv8i64(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv2i32_nxv8i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv8i64( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv1i8(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv1i8(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv2i32_nxv1i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv1i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv1i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv1i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i8(i32*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8(,,,,,, i32*, , , i64) @@ -34220,142 +5484,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv8i32(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv8i32(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv2i32_nxv8i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv8i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv8i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv8i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv32i8(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv32i8(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv2i32_nxv32i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv32i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv16i32(i32*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv16i32(,,,,,, i32*, , , i64) - -define @test_vloxseg6_nxv2i32_nxv16i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv16i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i16(i32*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16(,,,,,, i32*, , , i64) @@ -34372,26 +5519,23 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i64(i32*, , i64) @@ -34410,379 +5554,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv16i16(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv16i16(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv2i32_nxv16i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv16i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv32i16(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv32i16(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv2i32_nxv32i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv32i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv4i32(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv4i32(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv2i32_nxv4i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv4i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv16i8(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv16i8(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv2i32_nxv16i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv16i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv1i64(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv1i64(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv2i32_nxv1i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv1i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv1i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv1i64( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv1i32(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv1i32(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv2i32_nxv1i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv1i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv1i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv8i16(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv8i16(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv2i32_nxv8i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv8i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv8i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv8i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv4i8(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv4i8(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv2i32_nxv4i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv1i16(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv1i16(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv2i32_nxv1i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv1i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv1i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i32(i32*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32(,,,,,,, i32*, , , i64) @@ -34799,11 +5589,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -34811,251 +5600,15 @@ define @test_vloxseg7_mask_nxv2i32_nxv2i32(i32* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv8i8(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv8i8(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv2i32_nxv8i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv8i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv8i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv4i64(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv4i64(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv2i32_nxv4i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv4i64( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv64i8(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv64i8(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv2i32_nxv64i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv64i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv4i16(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv4i16(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv2i32_nxv4i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv8i64(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv8i64(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv2i32_nxv8i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv8i64( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv1i8(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv1i8(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv2i32_nxv1i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv1i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv1i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i8(i32*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8(,,,,,,, i32*, , , i64) @@ -35072,11 +5625,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -35084,134 +5636,15 @@ define @test_vloxseg7_mask_nxv2i32_nxv2i8(i32* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv8i32(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv8i32(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv2i32_nxv8i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv8i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv8i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv8i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv32i8(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv32i8(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv2i32_nxv32i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv32i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv16i32(i32*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv16i32(,,,,,,, i32*, , , i64) - -define @test_vloxseg7_nxv2i32_nxv16i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv16i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i16(i32*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16(,,,,,,, i32*, , , i64) @@ -35228,11 +5661,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -35240,15 +5672,13 @@ define @test_vloxseg7_mask_nxv2i32_nxv2i16(i32* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i64(i32*, , i64) @@ -35267,11 +5697,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -35279,377 +5708,15 @@ define @test_vloxseg7_mask_nxv2i32_nxv2i64(i32* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv16i16(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv16i16(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv2i32_nxv16i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv2i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv32i16(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv32i16(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv2i32_nxv32i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv4i32(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv4i32(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv2i32_nxv4i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv16i8(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv16i8(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv2i32_nxv16i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv1i64(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv1i64(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv2i32_nxv1i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv1i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv1i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv1i32(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv1i32(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv2i32_nxv1i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv1i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv1i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv8i16(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv8i16(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv2i32_nxv8i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv8i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv8i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv4i8(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv4i8(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv2i32_nxv4i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv1i16(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv1i16(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv2i32_nxv1i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv1i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv1i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i32(i32*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32(,,,,,,,, i32*, , , i64) @@ -35666,270 +5733,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv8i8(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv8i8(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv2i32_nxv8i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv8i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv2i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv8i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv4i64(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv4i64(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv2i32_nxv4i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv64i8(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv64i8(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv2i32_nxv64i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv4i16(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv4i16(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv2i32_nxv4i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv8i64(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv8i64(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv2i32_nxv8i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv1i8(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv1i8(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv2i32_nxv1i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv1i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv1i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i8(i32*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8(,,,,,,,, i32*, , , i64) @@ -35946,150 +5770,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv8i32(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv8i32(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv2i32_nxv8i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv8i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv2i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv8i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv32i8(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv32i8(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv2i32_nxv32i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv16i32(i32*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv16i32(,,,,,,,, i32*, , , i64) - -define @test_vloxseg8_nxv2i32_nxv16i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i16(i32*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16(,,,,,,,, i32*, , , i64) @@ -36106,28 +5807,25 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i64(i32*, , i64) @@ -36146,232 +5844,25 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv16i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv16i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv8i8_nxv16i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv16i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv32i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv32i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv8i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv32i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv4i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv4i32(,, i8*, , , i64) - -define @test_vloxseg2_nxv8i8_nxv4i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv4i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv16i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv16i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv8i8_nxv16i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv16i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv1i64(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv1i64(,, i8*, , , i64) - -define @test_vloxseg2_nxv8i8_nxv1i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv1i64( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv1i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv1i32(,, i8*, , , i64) - -define @test_vloxseg2_nxv8i8_nxv1i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv1i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i16(i8*, , i64) @@ -36390,126 +5881,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv4i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv4i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv8i8_nxv4i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv4i8(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv8i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv4i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv1i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv1i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv8i8_nxv1i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv1i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv2i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv2i32(,, i8*, , , i64) - -define @test_vloxseg2_nxv8i8_nxv2i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv2i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i8(i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8(,, i8*, , , i64) @@ -36526,126 +5911,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv4i64(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv4i64(,, i8*, , , i64) - -define @test_vloxseg2_nxv8i8_nxv4i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv4i64(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv8i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv4i64( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv64i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv64i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv8i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv64i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv4i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv4i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv8i8_nxv4i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv4i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i64(i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64(,, i8*, , , i64) @@ -36662,92 +5941,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv8i8_nxv8i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v16, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv1i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv1i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv8i8_nxv1i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv1i8(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64( %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv8i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv1i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv2i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv2i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv8i8_nxv2i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv2i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i32(i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32(,, i8*, , , i64) @@ -36764,370 +5971,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv32i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv32i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv8i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv32i8(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv8i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv32i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv16i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv16i32(,, i8*, , , i64) - -define @test_vloxseg2_nxv8i8_nxv16i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv16i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv2i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv2i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv8i8_nxv2i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv2i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv2i64(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv2i64(,, i8*, , , i64) - -define @test_vloxseg2_nxv8i8_nxv2i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv2i64( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv16i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv16i16(,,, i8*, , , i64) - -define @test_vloxseg3_nxv8i8_nxv16i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv16i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv32i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv32i16(,,, i8*, , , i64) - -define @test_vloxseg3_nxv8i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv32i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv4i32(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv4i32(,,, i8*, , , i64) - -define @test_vloxseg3_nxv8i8_nxv4i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv4i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv16i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv16i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv8i8_nxv16i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv16i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv1i64(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv1i64(,,, i8*, , , i64) - -define @test_vloxseg3_nxv8i8_nxv1i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv1i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv1i32(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv1i32(,,, i8*, , , i64) - -define @test_vloxseg3_nxv8i8_nxv1i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv1i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i16(i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16(,,, i8*, , , i64) @@ -37144,130 +6001,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv4i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv4i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv8i8_nxv4i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv4i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv8i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv4i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv1i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv1i16(,,, i8*, , , i64) - -define @test_vloxseg3_nxv8i8_nxv1i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv1i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv2i32(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv2i32(,,, i8*, , , i64) - -define @test_vloxseg3_nxv8i8_nxv2i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv2i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i8(i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8(,,, i8*, , , i64) @@ -37284,130 +6032,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv4i64(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv4i64(,,, i8*, , , i64) - -define @test_vloxseg3_nxv8i8_nxv4i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv4i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv8i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv4i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv64i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv64i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv8i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv64i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv4i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv4i16(,,, i8*, , , i64) - -define @test_vloxseg3_nxv8i8_nxv4i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv4i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i64(i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64(,,, i8*, , , i64) @@ -37424,95 +6064,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv8i8_nxv8i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v16, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv1i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv1i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv8i8_nxv1i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv1i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv8i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv1i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv2i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv2i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv8i8_nxv2i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv2i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i32(i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32(,,, i8*, , , i64) @@ -37529,381 +6095,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv32i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv32i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv8i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv32i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv8i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv32i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv16i32(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv16i32(,,, i8*, , , i64) - -define @test_vloxseg3_nxv8i8_nxv16i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv16i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv2i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv2i16(,,, i8*, , , i64) - -define @test_vloxseg3_nxv8i8_nxv2i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv2i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv2i64(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv2i64(,,, i8*, , , i64) - -define @test_vloxseg3_nxv8i8_nxv2i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv2i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv16i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv16i16(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv8i8_nxv16i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv16i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv32i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv32i16(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv8i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv32i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv4i32(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv4i32(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv8i8_nxv4i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv4i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv16i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv16i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv8i8_nxv16i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv16i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv1i64(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv1i64(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv8i8_nxv1i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv1i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv1i32(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv1i32(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv8i8_nxv1i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv1i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i16(i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16(,,,, i8*, , , i64) @@ -37920,134 +6126,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv4i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv4i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv8i8_nxv4i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv4i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv8i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv4i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv1i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv1i16(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv8i8_nxv1i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv1i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv2i32(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv2i32(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv8i8_nxv2i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv2i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i8(i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8(,,,, i8*, , , i64) @@ -38064,134 +6159,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv4i64(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv4i64(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv8i8_nxv4i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv4i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv8i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv4i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv64i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv64i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv8i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv64i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv4i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv4i16(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv8i8_nxv4i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv4i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i64(i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64(,,,, i8*, , , i64) @@ -38208,98 +6192,22 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv8i8_nxv8i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v16, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv1i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv1i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv8i8_nxv1i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv1i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv8i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv1i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv2i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv2i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv8i8_nxv2i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv2i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i32(i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32(,,,, i8*, , , i64) @@ -38316,392 +6224,22 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv32i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv32i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv8i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv32i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv8i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv32i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv16i32(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv16i32(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv8i8_nxv16i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv16i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv2i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv2i16(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv8i8_nxv2i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv2i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv2i64(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv2i64(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv8i8_nxv2i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv2i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv16i16(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv16i16(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv8i8_nxv16i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv8i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv16i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv32i16(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv32i16(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv8i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv8i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv32i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv4i32(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv4i32(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv8i8_nxv4i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv8i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv4i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv16i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv16i8(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv8i8_nxv16i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv8i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv16i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv1i64(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv1i64(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv8i8_nxv1i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv8i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv1i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv1i32(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv1i32(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv8i8_nxv1i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv8i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv1i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i16(i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16(,,,,, i8*, , , i64) @@ -38718,138 +6256,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv4i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv4i8(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv8i8_nxv4i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv4i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv8i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv4i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv1i16(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv1i16(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv8i8_nxv1i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv8i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv1i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv2i32(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv2i32(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv8i8_nxv2i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv8i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv2i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i8(i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8(,,,,, i8*, , , i64) @@ -38866,138 +6290,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv4i64(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv4i64(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv8i8_nxv4i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv4i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv8i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv4i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv64i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv64i8(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv8i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv8i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv64i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv4i16(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv4i16(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv8i8_nxv4i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv8i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv4i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i64(i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64(,,,,, i8*, , , i64) @@ -39014,101 +6324,23 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv8i8_nxv8i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei64.v v7, (a0), v16, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv1i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv1i8(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv8i8_nxv1i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv1i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv8i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv1i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv2i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv2i8(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv8i8_nxv2i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv8i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv2i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i32(i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32(,,,,, i8*, , , i64) @@ -39125,403 +6357,23 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv32i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv32i8(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv8i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv32i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv8i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv32i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv16i32(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv16i32(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv8i8_nxv16i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv8i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv16i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv2i16(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv2i16(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv8i8_nxv2i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv8i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv2i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv2i64(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv2i64(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv8i8_nxv2i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv8i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv2i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv16i16(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv16i16(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv8i8_nxv16i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv8i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv16i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv32i16(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv32i16(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv8i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv8i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv32i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv4i32(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv4i32(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv8i8_nxv4i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv8i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv4i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv16i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv16i8(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv8i8_nxv16i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv8i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv16i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv1i64(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv1i64(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv8i8_nxv1i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv8i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv1i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv1i32(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv1i32(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv8i8_nxv1i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv8i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv1i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i16(i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16(,,,,,, i8*, , , i64) @@ -39538,142 +6390,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv4i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv4i8(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv8i8_nxv4i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv4i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv8i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv4i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv1i16(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv1i16(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv8i8_nxv1i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv8i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv1i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv2i32(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv2i32(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv8i8_nxv2i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv8i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv2i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i8(i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8(,,,,,, i8*, , , i64) @@ -39690,142 +6425,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv4i64(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv4i64(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv8i8_nxv4i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv4i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv8i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv4i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv64i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv64i8(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv8i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv8i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv64i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv4i16(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv4i16(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv8i8_nxv4i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv8i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv4i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i64(i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64(,,,,,, i8*, , , i64) @@ -39842,104 +6460,24 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv8i8_nxv8i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg6ei64.v v7, (a0), v16, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv1i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv1i8(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv8i8_nxv1i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv1i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv8i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv1i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv2i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv2i8(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv8i8_nxv2i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv8i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv2i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i32(i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32(,,,,,, i8*, , , i64) @@ -39956,414 +6494,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv32i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv32i8(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv8i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv32i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv8i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv32i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv16i32(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv16i32(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv8i8_nxv16i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv8i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv16i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv2i16(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv2i16(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv8i8_nxv2i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv8i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv2i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv2i64(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv2i64(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv8i8_nxv2i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv8i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv2i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv16i16(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv16i16(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv8i8_nxv16i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv8i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv16i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv32i16(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv32i16(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv8i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv8i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv32i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv4i32(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv4i32(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv8i8_nxv4i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv8i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv4i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv16i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv16i8(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv8i8_nxv16i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv8i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv16i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv1i64(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv1i64(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv8i8_nxv1i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv8i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv1i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv1i32(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv1i32(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv8i8_nxv1i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv8i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i16(i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16(,,,,,,, i8*, , , i64) @@ -40380,11 +6529,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -40392,134 +6540,15 @@ define @test_vloxseg7_mask_nxv8i8_nxv8i16(i8* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv4i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv4i8(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv8i8_nxv4i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv4i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv8i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv1i16(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv1i16(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv8i8_nxv1i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv8i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv2i32(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv2i32(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv8i8_nxv2i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv8i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i8(i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8(,,,,,,, i8*, , , i64) @@ -40536,11 +6565,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -40548,134 +6576,15 @@ define @test_vloxseg7_mask_nxv8i8_nxv8i8(i8* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv4i64(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv4i64(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv8i8_nxv4i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv4i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv8i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv4i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv64i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv64i8(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv8i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv8i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv64i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv4i16(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv4i16(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv8i8_nxv4i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv8i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i64(i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64(,,,,,,, i8*, , , i64) @@ -40692,107 +6601,25 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv8i8_nxv8i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg7ei64.v v7, (a0), v16, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv1i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv1i8(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv8i8_nxv1i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv1i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv8i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv2i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv2i8(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv8i8_nxv2i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv8i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i32(i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32(,,,,,,, i8*, , , i64) @@ -40809,11 +6636,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -40821,413 +6647,15 @@ define @test_vloxseg7_mask_nxv8i8_nxv8i32(i8* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv32i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv32i8(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv8i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv32i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv8i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv32i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv16i32(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv16i32(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv8i8_nxv16i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv8i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv16i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv2i16(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv2i16(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv8i8_nxv2i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv8i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv2i64(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv2i64(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv8i8_nxv2i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv8i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv2i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv16i16(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv16i16(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv8i8_nxv16i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv8i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv32i16(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv32i16(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv8i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv8i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv4i32(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv4i32(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv8i8_nxv4i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv8i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv16i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv16i8(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv8i8_nxv16i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv8i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv1i64(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv1i64(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv8i8_nxv1i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv8i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv1i32(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv1i32(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv8i8_nxv1i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv8i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i16(i8*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16(,,,,,,,, i8*, , , i64) @@ -41244,150 +6672,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv4i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv4i8(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv8i8_nxv4i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv4i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv8i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv1i16(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv1i16(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv8i8_nxv1i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv8i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv2i32(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv2i32(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv8i8_nxv2i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv8i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i8(i8*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8(,,,,,,,, i8*, , , i64) @@ -41404,150 +6709,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv4i64(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv4i64(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv8i8_nxv4i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv4i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv8i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv64i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv64i8(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv8i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv8i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv4i16(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv4i16(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv8i8_nxv4i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv8i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i64(i8*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64(,,,,,,,, i8*, , , i64) @@ -41564,110 +6746,26 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv8i8_nxv8i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vloxseg8ei64.v v7, (a0), v16, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv1i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv1i8(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv8i8_nxv1i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv1i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv8i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv2i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv2i8(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv8i8_nxv2i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv8i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i32(i8*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32(,,,,,,,, i8*, , , i64) @@ -41684,91 +6782,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv32i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv32i8(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv8i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv8i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv16i32(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv16i32(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv8i8_nxv16i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv8i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vmv1r.v v17, v16 ; CHECK-NEXT: vmv1r.v v18, v16 ; CHECK-NEXT: vmv1r.v v19, v16 @@ -41777,165 +6794,15 @@ define @test_vloxseg8_mask_nxv8i8_nxv16i32(i8* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv2i16(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv2i16(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv8i8_nxv2i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv2i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv8i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv2i64(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv2i64(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv8i8_nxv2i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv8i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv16i16(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv16i16(,, i64*, , , i64) - -define @test_vloxseg2_nxv4i64_nxv16i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv16i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i64_nxv16i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv16i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv16i16( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv32i16(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv32i16(,, i64*, , , i64) - -define @test_vloxseg2_nxv4i64_nxv32i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv32i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i64_nxv32i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv32i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv32i16( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i32(i64*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32(,, i64*, , , i64) @@ -41952,160 +6819,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4i64_nxv4i32(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv4i64_nxv4i32( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv16i8(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv16i8(,, i64*, , , i64) - -define @test_vloxseg2_nxv4i64_nxv16i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv16i8(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32( %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4i64_nxv16i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv16i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv16i8( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv1i64(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv1i64(,, i64*, , , i64) - -define @test_vloxseg2_nxv4i64_nxv1i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv1i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i64_nxv1i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv1i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv1i64( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv1i32(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv1i32(,, i64*, , , i64) - -define @test_vloxseg2_nxv4i64_nxv1i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv1i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i64_nxv1i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv1i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv1i32( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv8i16(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv8i16(,, i64*, , , i64) - -define @test_vloxseg2_nxv4i64_nxv8i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv8i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i64_nxv8i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv8i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv8i16( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i8(i64*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8(,, i64*, , , i64) @@ -42122,126 +6849,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4i64_nxv4i8(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv4i64_nxv4i8( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv1i16(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv1i16(,, i64*, , , i64) - -define @test_vloxseg2_nxv4i64_nxv1i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv1i16(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8( %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4i64_nxv1i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv1i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv1i16( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv2i32(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv2i32(,, i64*, , , i64) - -define @test_vloxseg2_nxv4i64_nxv2i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv2i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i64_nxv2i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv2i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv2i32( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv8i8(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv8i8(,, i64*, , , i64) - -define @test_vloxseg2_nxv4i64_nxv8i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv8i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i64_nxv8i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv8i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv8i8( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i64(i64*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64(,, i64*, , , i64) @@ -42258,58 +6879,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4i64_nxv4i64(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv4i64_nxv4i64( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei64.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv64i8(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv64i8(,, i64*, , , i64) - -define @test_vloxseg2_nxv4i64_nxv64i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv64i8(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64( %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4i64_nxv64i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv64i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv64i8( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i16(i64*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16(,, i64*, , , i64) @@ -42326,364 +6909,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4i64_nxv4i16(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv4i64_nxv4i16( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv8i64(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv8i64(,, i64*, , , i64) - -define @test_vloxseg2_nxv4i64_nxv8i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv8i64(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16( %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4i64_nxv8i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv8i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv8i64( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv1i8(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv1i8(,, i64*, , , i64) - -define @test_vloxseg2_nxv4i64_nxv1i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv1i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i64_nxv1i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv1i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv1i8( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv2i8(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv2i8(,, i64*, , , i64) - -define @test_vloxseg2_nxv4i64_nxv2i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv2i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i64_nxv2i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv2i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv2i8( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv8i32(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv8i32(,, i64*, , , i64) - -define @test_vloxseg2_nxv4i64_nxv8i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv8i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i64_nxv8i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv8i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv8i32( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv32i8(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv32i8(,, i64*, , , i64) - -define @test_vloxseg2_nxv4i64_nxv32i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv32i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i64_nxv32i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv32i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv32i8( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv16i32(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv16i32(,, i64*, , , i64) - -define @test_vloxseg2_nxv4i64_nxv16i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv16i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i64_nxv16i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv16i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv16i32( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv2i16(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv2i16(,, i64*, , , i64) - -define @test_vloxseg2_nxv4i64_nxv2i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv2i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i64_nxv2i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv2i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv2i16( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv2i64(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv2i64(,, i64*, , , i64) - -define @test_vloxseg2_nxv4i64_nxv2i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv2i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i64_nxv2i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv2i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv2i64( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv16i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv16i16(,, i16*, , , i64) - -define @test_vloxseg2_nxv4i16_nxv16i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv16i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv32i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv32i16(,, i16*, , , i64) - -define @test_vloxseg2_nxv4i16_nxv32i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv32i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i32(i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32(,, i16*, , , i64) @@ -42700,160 +6939,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv16i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv16i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv4i16_nxv16i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv16i8(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32( %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv16i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv1i64(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv1i64(,, i16*, , , i64) - -define @test_vloxseg2_nxv4i16_nxv1i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv1i64( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv1i32(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv1i32(,, i16*, , , i64) - -define @test_vloxseg2_nxv4i16_nxv1i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv1i32( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv8i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv8i16(,, i16*, , , i64) - -define @test_vloxseg2_nxv4i16_nxv8i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv8i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i8(i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8(,, i16*, , , i64) @@ -42870,126 +6969,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv1i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv1i16(,, i16*, , , i64) - -define @test_vloxseg2_nxv4i16_nxv1i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv1i16(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8( %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv1i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv2i32(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv2i32(,, i16*, , , i64) - -define @test_vloxseg2_nxv4i16_nxv2i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv2i32( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv8i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv8i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv4i16_nxv8i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv8i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i64(i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64(,, i16*, , , i64) @@ -43006,58 +6999,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv4i16_nxv4i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv64i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv64i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv4i16_nxv64i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv64i8(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64( %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv64i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i16(i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16(,, i16*, , , i64) @@ -43074,366 +7029,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv8i64(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv8i64(,, i16*, , , i64) - -define @test_vloxseg2_nxv4i16_nxv8i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv8i64(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16( %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv8i64( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv1i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv1i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv4i16_nxv1i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv1i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv2i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv2i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv4i16_nxv2i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv2i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv8i32(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv8i32(,, i16*, , , i64) - -define @test_vloxseg2_nxv4i16_nxv8i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv8i32( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv32i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv32i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv4i16_nxv32i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv32i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv16i32(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv16i32(,, i16*, , , i64) - -define @test_vloxseg2_nxv4i16_nxv16i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv16i32( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv2i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv2i16(,, i16*, , , i64) - -define @test_vloxseg2_nxv4i16_nxv2i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv2i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv2i64(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv2i64(,, i16*, , , i64) - -define @test_vloxseg2_nxv4i16_nxv2i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv2i64( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv16i16(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv16i16(,,, i16*, , , i64) - -define @test_vloxseg3_nxv4i16_nxv16i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv16i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv32i16(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv32i16(,,, i16*, , , i64) - -define @test_vloxseg3_nxv4i16_nxv32i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv32i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i32(i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32(,,, i16*, , , i64) @@ -43450,165 +7059,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv16i8(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv16i8(,,, i16*, , , i64) - -define @test_vloxseg3_nxv4i16_nxv16i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv16i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv16i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv1i64(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv1i64(,,, i16*, , , i64) - -define @test_vloxseg3_nxv4i16_nxv1i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv1i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv1i32(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv1i32(,,, i16*, , , i64) - -define @test_vloxseg3_nxv4i16_nxv1i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv1i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv8i16(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv8i16(,,, i16*, , , i64) - -define @test_vloxseg3_nxv4i16_nxv8i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv8i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i8(i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8(,,, i16*, , , i64) @@ -43625,130 +7090,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv1i16(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv1i16(,,, i16*, , , i64) - -define @test_vloxseg3_nxv4i16_nxv1i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv1i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv1i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv2i32(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv2i32(,,, i16*, , , i64) - -define @test_vloxseg3_nxv4i16_nxv2i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv2i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv8i8(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv8i8(,,, i16*, , , i64) - -define @test_vloxseg3_nxv4i16_nxv8i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv8i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i64(i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64(,,, i16*, , , i64) @@ -43765,60 +7122,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv4i16_nxv4i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv64i8(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv64i8(,,, i16*, , , i64) - -define @test_vloxseg3_nxv4i16_nxv64i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv64i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv64i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i16(i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16(,,, i16*, , , i64) @@ -43835,377 +7153,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv8i64(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv8i64(,,, i16*, , , i64) - -define @test_vloxseg3_nxv4i16_nxv8i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv8i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv8i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv1i8(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv1i8(,,, i16*, , , i64) - -define @test_vloxseg3_nxv4i16_nxv1i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv1i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv2i8(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv2i8(,,, i16*, , , i64) - -define @test_vloxseg3_nxv4i16_nxv2i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv2i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv8i32(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv8i32(,,, i16*, , , i64) - -define @test_vloxseg3_nxv4i16_nxv8i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv8i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv32i8(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv32i8(,,, i16*, , , i64) - -define @test_vloxseg3_nxv4i16_nxv32i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv32i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv16i32(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv16i32(,,, i16*, , , i64) - -define @test_vloxseg3_nxv4i16_nxv16i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv16i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv2i16(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv2i16(,,, i16*, , , i64) - -define @test_vloxseg3_nxv4i16_nxv2i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv2i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv2i64(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv2i64(,,, i16*, , , i64) - -define @test_vloxseg3_nxv4i16_nxv2i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv2i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv16i16(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv16i16(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv4i16_nxv16i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv16i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv32i16(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv32i16(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv4i16_nxv32i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv32i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i32(i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32(,,,, i16*, , , i64) @@ -44222,170 +7185,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv16i8(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv16i8(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv4i16_nxv16i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv16i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv16i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv1i64(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv1i64(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv4i16_nxv1i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv1i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv1i32(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv1i32(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv4i16_nxv1i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv1i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv8i16(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv8i16(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv4i16_nxv8i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv8i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i8(i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8(,,,, i16*, , , i64) @@ -44402,134 +7218,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv1i16(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv1i16(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv4i16_nxv1i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv1i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv1i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv2i32(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv2i32(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv4i16_nxv2i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv2i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv8i8(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv8i8(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv4i16_nxv8i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv8i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i64(i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64(,,,, i16*, , , i64) @@ -44546,62 +7251,22 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv4i16_nxv4i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv64i8(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv64i8(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv4i16_nxv64i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv64i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv64i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i16(i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16(,,,, i16*, , , i64) @@ -44618,388 +7283,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv8i64(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv8i64(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv4i16_nxv8i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv8i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv8i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv1i8(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv1i8(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv4i16_nxv1i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv1i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv2i8(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv2i8(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv4i16_nxv2i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv2i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv8i32(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv8i32(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv4i16_nxv8i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv8i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv32i8(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv32i8(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv4i16_nxv32i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv32i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv16i32(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv16i32(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv4i16_nxv16i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv16i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv2i16(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv2i16(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv4i16_nxv2i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv2i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv2i64(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv2i64(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv4i16_nxv2i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv2i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv16i16(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv16i16(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv4i16_nxv16i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv16i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv32i16(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv32i16(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv4i16_nxv32i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv32i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i32(i16*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32(,,,,, i16*, , , i64) @@ -45016,175 +7316,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv16i8(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv16i8(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv4i16_nxv16i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv16i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv4i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv16i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv1i64(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv1i64(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv4i16_nxv1i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv1i64( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv1i32(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv1i32(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv4i16_nxv1i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv1i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv8i16(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv8i16(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv4i16_nxv8i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv8i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i8(i16*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8(,,,,, i16*, , , i64) @@ -45201,138 +7350,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv1i16(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv1i16(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv4i16_nxv1i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv1i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv4i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv1i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv2i32(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv2i32(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv4i16_nxv2i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv2i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv8i8(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv8i8(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv4i16_nxv8i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv8i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i64(i16*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64(,,,,, i16*, , , i64) @@ -45349,64 +7384,23 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv4i16_nxv4i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv64i8(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv64i8(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv4i16_nxv64i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv64i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv4i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv64i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i16(i16*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16(,,,,, i16*, , , i64) @@ -45423,399 +7417,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv8i64(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv8i64(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv4i16_nxv8i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv8i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv4i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv8i64( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv1i8(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv1i8(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv4i16_nxv1i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv1i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv2i8(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv2i8(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv4i16_nxv2i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv2i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv8i32(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv8i32(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv4i16_nxv8i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv8i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv32i8(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv32i8(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv4i16_nxv32i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv32i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv16i32(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv16i32(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv4i16_nxv16i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv16i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv2i16(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv2i16(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv4i16_nxv2i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv2i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv2i64(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv2i64(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv4i16_nxv2i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv2i64( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv16i16(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv16i16(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv4i16_nxv16i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv16i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv32i16(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv32i16(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv4i16_nxv32i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv32i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i32(i16*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32(,,,,,, i16*, , , i64) @@ -45832,180 +7451,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv16i8(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv16i8(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv4i16_nxv16i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv16i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv4i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv16i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv1i64(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv1i64(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv4i16_nxv1i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv1i64( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv1i32(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv1i32(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv4i16_nxv1i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv1i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv8i16(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv8i16(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv4i16_nxv8i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv8i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i8(i16*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8(,,,,,, i16*, , , i64) @@ -46022,142 +7486,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv1i16(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv1i16(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv4i16_nxv1i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv1i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv4i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv1i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv2i32(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv2i32(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv4i16_nxv2i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv2i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv8i8(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv8i8(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv4i16_nxv8i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv8i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i64(i16*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64(,,,,,, i16*, , , i64) @@ -46174,66 +7521,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv4i16_nxv4i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv64i8(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv64i8(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv4i16_nxv64i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv64i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv4i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv64i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i16(i16*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16(,,,,,, i16*, , , i64) @@ -46250,410 +7556,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv8i64(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv8i64(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv4i16_nxv8i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv8i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv4i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv8i64( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv1i8(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv1i8(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv4i16_nxv1i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv1i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv2i8(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv2i8(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv4i16_nxv2i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv2i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv8i32(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv8i32(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv4i16_nxv8i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv8i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv32i8(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv32i8(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv4i16_nxv32i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv32i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv16i32(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv16i32(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv4i16_nxv16i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv16i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv2i16(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv2i16(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv4i16_nxv2i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv2i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv2i64(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv2i64(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv4i16_nxv2i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv2i64( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv16i16(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv16i16(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv4i16_nxv16i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv32i16(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv32i16(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv4i16_nxv32i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i32(i16*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32(,,,,,,, i16*, , , i64) @@ -46670,11 +7591,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -46682,173 +7602,15 @@ define @test_vloxseg7_mask_nxv4i16_nxv4i32(i16* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv16i8(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv16i8(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv4i16_nxv16i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv16i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv4i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv1i64(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv1i64(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv4i16_nxv1i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv1i64( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv1i32(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv1i32(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv4i16_nxv1i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv8i16(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv8i16(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv4i16_nxv8i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i8(i16*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8(,,,,,,, i16*, , , i64) @@ -46865,11 +7627,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -46877,134 +7638,15 @@ define @test_vloxseg7_mask_nxv4i16_nxv4i8(i16* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv1i16(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv1i16(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv4i16_nxv1i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv1i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv4i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv2i32(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv2i32(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv4i16_nxv2i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv8i8(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv8i8(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv4i16_nxv8i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i64(i16*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64(,,,,,,, i16*, , , i64) @@ -47021,11 +7663,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv4i16_nxv4i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -47033,56 +7674,15 @@ define @test_vloxseg7_mask_nxv4i16_nxv4i64(i16* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv64i8(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv64i8(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv4i16_nxv64i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv64i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv4i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i16(i16*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16(,,,,,,, i16*, , , i64) @@ -47099,11 +7699,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -47111,409 +7710,15 @@ define @test_vloxseg7_mask_nxv4i16_nxv4i16(i16* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv8i64(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv8i64(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv4i16_nxv8i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv8i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv4i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv8i64( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv1i8(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv1i8(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv4i16_nxv1i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv2i8(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv2i8(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv4i16_nxv2i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv8i32(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv8i32(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv4i16_nxv8i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv32i8(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv32i8(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv4i16_nxv32i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv16i32(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv16i32(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv4i16_nxv16i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv2i16(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv2i16(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv4i16_nxv2i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv2i64(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv2i64(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv4i16_nxv2i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv2i64( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv16i16(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv16i16(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv4i16_nxv16i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv32i16(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv32i16(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv4i16_nxv32i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i32(i16*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32(,,,,,,,, i16*, , , i64) @@ -47530,190 +7735,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv16i8(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv16i8(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv4i16_nxv16i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv16i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv4i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv1i64(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv1i64(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv4i16_nxv1i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv1i32(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv1i32(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv4i16_nxv1i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv8i16(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv8i16(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv4i16_nxv8i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i8(i16*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8(,,,,,,,, i16*, , , i64) @@ -47730,150 +7772,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv1i16(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv1i16(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv4i16_nxv1i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv1i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv4i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv2i32(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv2i32(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv4i16_nxv2i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv8i8(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv8i8(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv4i16_nxv8i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i64(i16*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64(,,,,,,,, i16*, , , i64) @@ -47890,51 +7809,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv4i16_nxv4i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv64i8(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv64i8(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv4i16_nxv64i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vmv1r.v v17, v16 ; CHECK-NEXT: vmv1r.v v18, v16 ; CHECK-NEXT: vmv1r.v v19, v16 @@ -47943,15 +7821,13 @@ define @test_vloxseg8_mask_nxv4i16_nxv64i8(i16* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i16(i16*, , i64) @@ -47970,331 +7846,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv8i64(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv8i64(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv4i16_nxv8i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv1i8(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv1i8(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv4i16_nxv1i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv2i8(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv2i8(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv4i16_nxv2i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv8i32(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv8i32(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv4i16_nxv8i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv32i8(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv32i8(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv4i16_nxv32i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv16i32(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv16i32(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv4i16_nxv16i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv2i16(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv2i16(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv4i16_nxv2i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv2i64(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv2i64(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv4i16_nxv2i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -48303,151 +7858,13 @@ define @test_vloxseg8_mask_nxv4i16_nxv2i64(i16* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv16i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv16i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv1i8_nxv16i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv16i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv32i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv32i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv1i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv32i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv4i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv4i32(,, i8*, , , i64) - -define @test_vloxseg2_nxv1i8_nxv4i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv4i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv16i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv16i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv1i8_nxv16i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv16i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i64(i8*, , i64) @@ -48466,22 +7883,18 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i32(i8*, , i64) @@ -48500,92 +7913,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv8i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv8i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv1i8_nxv8i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv8i16(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv8i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv4i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv4i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv1i8_nxv4i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv4i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i16(i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16(,, i8*, , , i64) @@ -48602,228 +7943,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv2i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv2i32(,, i8*, , , i64) - -define @test_vloxseg2_nxv1i8_nxv2i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv2i32(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv2i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv8i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv8i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv1i8_nxv8i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv8i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv4i64(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv4i64(,, i8*, , , i64) - -define @test_vloxseg2_nxv1i8_nxv4i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv4i64( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv64i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv64i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv1i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv64i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv4i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv4i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv1i8_nxv4i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv4i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv8i64(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv8i64(,, i8*, , , i64) - -define @test_vloxseg2_nxv1i8_nxv8i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv8i64( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i8(i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8(,, i8*, , , i64) @@ -48840,368 +7973,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv2i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv2i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv1i8_nxv2i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv2i8(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv2i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv8i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv8i32(,, i8*, , , i64) - -define @test_vloxseg2_nxv1i8_nxv8i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv8i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv32i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv32i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv1i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv32i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv16i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv16i32(,, i8*, , , i64) - -define @test_vloxseg2_nxv1i8_nxv16i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv16i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv2i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv2i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv1i8_nxv2i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv2i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv2i64(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv2i64(,, i8*, , , i64) - -define @test_vloxseg2_nxv1i8_nxv2i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv2i64( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv16i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv16i16(,,, i8*, , , i64) - -define @test_vloxseg3_nxv1i8_nxv16i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv16i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv32i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv32i16(,,, i8*, , , i64) - -define @test_vloxseg3_nxv1i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv32i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv4i32(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv4i32(,,, i8*, , , i64) - -define @test_vloxseg3_nxv1i8_nxv4i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv4i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv16i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv16i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv1i8_nxv16i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv16i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i64(i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64(,,, i8*, , , i64) @@ -49218,23 +8003,20 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i32(i8*, , i64) @@ -49253,95 +8035,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv8i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv8i16(,,, i8*, , , i64) - -define @test_vloxseg3_nxv1i8_nxv8i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv8i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv8i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv4i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv4i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv1i8_nxv4i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv4i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i16(i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16(,,, i8*, , , i64) @@ -49358,235 +8067,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv2i32(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv2i32(,,, i8*, , , i64) - -define @test_vloxseg3_nxv1i8_nxv2i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv2i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv2i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv8i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv8i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv1i8_nxv8i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv8i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv4i64(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv4i64(,,, i8*, , , i64) - -define @test_vloxseg3_nxv1i8_nxv4i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv4i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv64i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv64i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv1i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv64i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv4i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv4i16(,,, i8*, , , i64) - -define @test_vloxseg3_nxv1i8_nxv4i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv4i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv8i64(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv8i64(,,, i8*, , , i64) - -define @test_vloxseg3_nxv1i8_nxv8i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv8i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i8(i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8(,,, i8*, , , i64) @@ -49603,379 +8099,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv2i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv2i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv1i8_nxv2i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv2i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv2i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv8i32(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv8i32(,,, i8*, , , i64) - -define @test_vloxseg3_nxv1i8_nxv8i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv8i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv32i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv32i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv1i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv32i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv16i32(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv16i32(,,, i8*, , , i64) - -define @test_vloxseg3_nxv1i8_nxv16i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv16i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv2i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv2i16(,,, i8*, , , i64) - -define @test_vloxseg3_nxv1i8_nxv2i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv2i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv2i64(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv2i64(,,, i8*, , , i64) - -define @test_vloxseg3_nxv1i8_nxv2i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv2i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv16i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv16i16(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv1i8_nxv16i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv16i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv32i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv32i16(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv1i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv32i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv4i32(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv4i32(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv1i8_nxv4i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv4i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv16i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv16i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv1i8_nxv16i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv16i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i64(i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64(,,,, i8*, , , i64) @@ -49992,24 +8131,21 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i32(i8*, , i64) @@ -50028,98 +8164,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv8i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv8i16(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv1i8_nxv8i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv8i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv8i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv4i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv4i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv1i8_nxv4i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv4i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i16(i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16(,,,, i8*, , , i64) @@ -50136,242 +8197,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv2i32(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv2i32(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv1i8_nxv2i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv2i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv2i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv8i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv8i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv1i8_nxv8i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv8i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv4i64(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv4i64(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv1i8_nxv4i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv4i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv64i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv64i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv1i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv64i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv4i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv4i16(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv1i8_nxv4i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv4i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv8i64(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv8i64(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv1i8_nxv8i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv8i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i8(i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8(,,,, i8*, , , i64) @@ -50388,390 +8230,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv2i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv2i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv1i8_nxv2i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv2i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv2i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv8i32(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv8i32(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv1i8_nxv8i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv8i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv32i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv32i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv1i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv32i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv16i32(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv16i32(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv1i8_nxv16i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv16i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv2i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv2i16(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv1i8_nxv2i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv2i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv2i64(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv2i64(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv1i8_nxv2i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv2i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv16i16(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv16i16(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv1i8_nxv16i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv16i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv32i16(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv32i16(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv1i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv32i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv4i32(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv4i32(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv1i8_nxv4i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv4i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv16i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv16i8(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv1i8_nxv16i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv16i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i64(i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64(,,,,, i8*, , , i64) @@ -50788,25 +8263,22 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i32(i8*, , i64) @@ -50825,101 +8297,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv8i16(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv8i16(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv1i8_nxv8i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv8i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv8i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv4i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv4i8(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv1i8_nxv4i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv4i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i16(i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16(,,,,, i8*, , , i64) @@ -50936,249 +8331,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv2i32(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv2i32(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv1i8_nxv2i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv2i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv2i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv8i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv8i8(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv1i8_nxv8i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv8i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv4i64(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv4i64(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv1i8_nxv4i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv4i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv64i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv64i8(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv1i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv64i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv4i16(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv4i16(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv1i8_nxv4i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv4i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv8i64(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv8i64(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv1i8_nxv8i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv8i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i8(i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8(,,,,, i8*, , , i64) @@ -51195,401 +8365,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv2i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv2i8(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv1i8_nxv2i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv2i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv2i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv8i32(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv8i32(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv1i8_nxv8i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv8i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv32i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv32i8(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv1i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv32i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv16i32(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv16i32(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv1i8_nxv16i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv16i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv2i16(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv2i16(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv1i8_nxv2i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv2i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv2i64(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv2i64(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv1i8_nxv2i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv2i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv16i16(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv16i16(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv1i8_nxv16i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv16i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv32i16(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv32i16(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv1i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv32i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv4i32(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv4i32(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv1i8_nxv4i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv4i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv16i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv16i8(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv1i8_nxv16i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv16i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i64(i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64(,,,,,, i8*, , , i64) @@ -51606,26 +8399,23 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i32(i8*, , i64) @@ -51644,104 +8434,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv8i16(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv8i16(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv1i8_nxv8i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv8i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv8i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv4i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv4i8(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv1i8_nxv4i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv4i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i16(i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16(,,,,,, i8*, , , i64) @@ -51758,256 +8469,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv2i32(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv2i32(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv1i8_nxv2i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv2i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv2i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv8i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv8i8(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv1i8_nxv8i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv8i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv4i64(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv4i64(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv1i8_nxv4i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv4i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv64i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv64i8(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv1i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv64i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv4i16(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv4i16(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv1i8_nxv4i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv4i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv8i64(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv8i64(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv1i8_nxv8i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv8i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i8(i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8(,,,,,, i8*, , , i64) @@ -52024,412 +8504,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv2i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv2i8(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv1i8_nxv2i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv2i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv2i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv8i32(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv8i32(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv1i8_nxv8i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv8i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv32i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv32i8(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv1i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv32i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv16i32(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv16i32(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv1i8_nxv16i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv16i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv2i16(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv2i16(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv1i8_nxv2i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv2i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv2i64(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv2i64(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv1i8_nxv2i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv2i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv16i16(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv16i16(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv1i8_nxv16i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv16i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv32i16(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv32i16(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv1i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv32i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv4i32(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv4i32(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv1i8_nxv4i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv4i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv16i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv16i8(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv1i8_nxv16i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv16i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i64(i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64(,,,,,,, i8*, , , i64) @@ -52446,11 +8539,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -52458,15 +8550,13 @@ define @test_vloxseg7_mask_nxv1i8_nxv1i64(i8* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i32(i8*, , i64) @@ -52485,11 +8575,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -52497,95 +8586,15 @@ define @test_vloxseg7_mask_nxv1i8_nxv1i32(i8* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv8i16(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv8i16(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv1i8_nxv8i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv8i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv8i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv4i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv4i8(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv1i8_nxv4i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i16(i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16(,,,,,,, i8*, , , i64) @@ -52602,11 +8611,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -52614,251 +8622,15 @@ define @test_vloxseg7_mask_nxv1i8_nxv1i16(i8* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv2i32(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv2i32(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv1i8_nxv2i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv2i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv8i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv8i8(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv1i8_nxv8i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv4i64(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv4i64(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv1i8_nxv4i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv4i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv64i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv64i8(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv1i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv64i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv4i16(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv4i16(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv1i8_nxv4i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv8i64(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv8i64(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv1i8_nxv8i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv8i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i8(i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8(,,,,,,, i8*, , , i64) @@ -52875,11 +8647,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -52887,411 +8658,15 @@ define @test_vloxseg7_mask_nxv1i8_nxv1i8(i8* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv2i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv2i8(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv1i8_nxv2i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv2i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv8i32(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv8i32(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv1i8_nxv8i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv8i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv32i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv32i8(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv1i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv32i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv16i32(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv16i32(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv1i8_nxv16i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv16i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv2i16(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv2i16(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv1i8_nxv2i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv2i64(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv2i64(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv1i8_nxv2i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv2i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv16i16(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv16i16(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv1i8_nxv16i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv32i16(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv32i16(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv1i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv4i32(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv4i32(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv1i8_nxv4i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv16i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv16i8(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv1i8_nxv16i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i64(i8*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64(,,,,,,,, i8*, , , i64) @@ -53308,28 +8683,25 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i32(i8*, , i64) @@ -53348,51 +8720,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv8i16(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv8i16(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv1i8_nxv8i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -53401,57 +8732,15 @@ define @test_vloxseg8_mask_nxv1i8_nxv8i16(i8* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv4i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv4i8(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv1i8_nxv4i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv4i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv1i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i16(i8*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16(,,,,,,,, i8*, , , i64) @@ -53468,270 +8757,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv2i32(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv2i32(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv1i8_nxv2i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv2i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv1i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv8i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv8i8(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv1i8_nxv8i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv4i64(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv4i64(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv1i8_nxv4i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv64i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv64i8(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv1i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv4i16(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv4i16(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv1i8_nxv4i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv8i64(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv8i64(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv1i8_nxv8i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i8(i8*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8(,,,,,,,, i8*, , , i64) @@ -53748,251 +8794,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv2i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv2i8(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv1i8_nxv2i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv8i32(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv8i32(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv1i8_nxv8i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv32i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv32i8(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv1i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv16i32(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv16i32(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv1i8_nxv16i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv2i16(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv2i16(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv1i8_nxv2i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv2i64(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv2i64(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv1i8_nxv2i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -54001,321 +8806,13 @@ define @test_vloxseg8_mask_nxv1i8_nxv2i64(i8* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv16i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv16i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv2i8_nxv16i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv16i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv32i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv32i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv2i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv32i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv4i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv4i32(,, i8*, , , i64) - -define @test_vloxseg2_nxv2i8_nxv4i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv4i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv16i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv16i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv2i8_nxv16i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv16i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv1i64(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv1i64(,, i8*, , , i64) - -define @test_vloxseg2_nxv2i8_nxv1i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv1i64( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv1i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv1i32(,, i8*, , , i64) - -define @test_vloxseg2_nxv2i8_nxv1i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv1i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv8i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv8i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv2i8_nxv8i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv8i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv4i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv4i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv2i8_nxv4i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv4i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv1i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv1i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv2i8_nxv1i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv1i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i32(i8*, , i64) @@ -54334,228 +8831,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv8i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv8i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv2i8_nxv8i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv8i8(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv8i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv4i64(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv4i64(,, i8*, , , i64) - -define @test_vloxseg2_nxv2i8_nxv4i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv4i64( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv64i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv64i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv2i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv64i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv4i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv4i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv2i8_nxv4i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv4i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv8i64(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv8i64(,, i8*, , , i64) - -define @test_vloxseg2_nxv2i8_nxv8i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv8i64( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv1i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv1i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv2i8_nxv1i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv1i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i8(i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8(,, i8*, , , i64) @@ -54572,126 +8861,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv8i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv8i32(,, i8*, , , i64) - -define @test_vloxseg2_nxv2i8_nxv8i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv8i32(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv8i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv32i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv32i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv2i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv32i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv16i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv16i32(,, i8*, , , i64) - -define @test_vloxseg2_nxv2i8_nxv16i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv16i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i16(i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16(,, i8*, , , i64) @@ -54708,22 +8891,18 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i64(i8*, , i64) @@ -54742,339 +8921,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv16i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv16i16(,,, i8*, , , i64) - -define @test_vloxseg3_nxv2i8_nxv16i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv16i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv32i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv32i16(,,, i8*, , , i64) - -define @test_vloxseg3_nxv2i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv32i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv4i32(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv4i32(,,, i8*, , , i64) - -define @test_vloxseg3_nxv2i8_nxv4i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv4i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv16i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv16i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv2i8_nxv16i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv16i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv1i64(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv1i64(,,, i8*, , , i64) - -define @test_vloxseg3_nxv2i8_nxv1i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv1i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv1i32(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv1i32(,,, i8*, , , i64) - -define @test_vloxseg3_nxv2i8_nxv1i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv1i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv8i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv8i16(,,, i8*, , , i64) - -define @test_vloxseg3_nxv2i8_nxv8i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv8i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv4i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv4i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv2i8_nxv4i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv4i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv1i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv1i16(,,, i8*, , , i64) - -define @test_vloxseg3_nxv2i8_nxv1i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv1i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i32(i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32(,,, i8*, , , i64) @@ -55091,235 +8951,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv8i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv8i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv2i8_nxv8i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv8i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv8i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv4i64(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv4i64(,,, i8*, , , i64) - -define @test_vloxseg3_nxv2i8_nxv4i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv4i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv64i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv64i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv2i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv64i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv4i16(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv4i16(,,, i8*, , , i64) - -define @test_vloxseg3_nxv2i8_nxv4i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv4i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv8i64(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv8i64(,,, i8*, , , i64) - -define @test_vloxseg3_nxv2i8_nxv8i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv8i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv1i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv1i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv2i8_nxv1i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv1i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i8(i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8(,,, i8*, , , i64) @@ -55336,130 +8983,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv8i32(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv8i32(,,, i8*, , , i64) - -define @test_vloxseg3_nxv2i8_nxv8i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv8i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv8i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv32i8(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv32i8(,,, i8*, , , i64) - -define @test_vloxseg3_nxv2i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv32i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv16i32(i8*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv16i32(,,, i8*, , , i64) - -define @test_vloxseg3_nxv2i8_nxv16i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv16i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i16(i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16(,,, i8*, , , i64) @@ -55476,23 +9015,20 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i64(i8*, , i64) @@ -55511,349 +9047,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv16i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv16i16(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv2i8_nxv16i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv16i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv32i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv32i16(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv2i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv32i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv4i32(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv4i32(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv2i8_nxv4i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv4i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv16i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv16i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv2i8_nxv16i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv16i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv1i64(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv1i64(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv2i8_nxv1i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv1i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv1i32(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv1i32(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv2i8_nxv1i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv1i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv8i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv8i16(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv2i8_nxv8i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv8i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv4i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv4i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv2i8_nxv4i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv4i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv1i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv1i16(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv2i8_nxv1i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv1i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i32(i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32(,,,, i8*, , , i64) @@ -55870,242 +9078,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv8i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv8i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv2i8_nxv8i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv8i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv8i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv4i64(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv4i64(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv2i8_nxv4i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv4i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv64i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv64i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv2i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv64i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv4i16(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv4i16(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv2i8_nxv4i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv4i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv8i64(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv8i64(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv2i8_nxv8i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv8i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv1i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv1i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv2i8_nxv1i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv1i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i8(i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8(,,,, i8*, , , i64) @@ -56122,134 +9111,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv8i32(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv8i32(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv2i8_nxv8i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv8i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv8i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv32i8(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv32i8(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv2i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv32i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv16i32(i8*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv16i32(,,,, i8*, , , i64) - -define @test_vloxseg4_nxv2i8_nxv16i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv16i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i16(i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16(,,,, i8*, , , i64) @@ -56266,24 +9144,21 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i64(i8*, , i64) @@ -56302,359 +9177,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv16i16(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv16i16(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv2i8_nxv16i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv16i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv32i16(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv32i16(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv2i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv32i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv4i32(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv4i32(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv2i8_nxv4i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv4i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv16i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv16i8(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv2i8_nxv16i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv16i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv1i64(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv1i64(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv2i8_nxv1i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv1i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv1i32(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv1i32(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv2i8_nxv1i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv1i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv8i16(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv8i16(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv2i8_nxv8i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv8i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv4i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv4i8(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv2i8_nxv4i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv4i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv1i16(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv1i16(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv2i8_nxv1i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv1i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i32(i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32(,,,,, i8*, , , i64) @@ -56671,249 +9210,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv8i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv8i8(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv2i8_nxv8i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv8i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv8i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv4i64(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv4i64(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv2i8_nxv4i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv4i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv64i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv64i8(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv2i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv64i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv4i16(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv4i16(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv2i8_nxv4i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv4i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv8i64(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv8i64(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv2i8_nxv8i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv8i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv1i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv1i8(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv2i8_nxv1i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv1i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i8(i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8(,,,,, i8*, , , i64) @@ -56930,138 +9244,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv8i32(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv8i32(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv2i8_nxv8i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv8i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv8i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv32i8(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv32i8(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv2i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv32i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv16i32(i8*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv16i32(,,,,, i8*, , , i64) - -define @test_vloxseg5_nxv2i8_nxv16i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv16i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i16(i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16(,,,,, i8*, , , i64) @@ -57078,25 +9278,22 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i64(i8*, , i64) @@ -57115,369 +9312,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv16i16(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv16i16(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv2i8_nxv16i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv16i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv32i16(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv32i16(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv2i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv32i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv4i32(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv4i32(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv2i8_nxv4i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv4i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv16i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv16i8(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv2i8_nxv16i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv16i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv1i64(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv1i64(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv2i8_nxv1i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv1i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv1i32(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv1i32(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv2i8_nxv1i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv1i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv8i16(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv8i16(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv2i8_nxv8i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv8i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv4i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv4i8(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv2i8_nxv4i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv4i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv1i16(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv1i16(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv2i8_nxv1i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv1i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i32(i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32(,,,,,, i8*, , , i64) @@ -57494,256 +9346,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv8i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv8i8(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv2i8_nxv8i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv8i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv8i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv4i64(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv4i64(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv2i8_nxv4i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv4i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv64i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv64i8(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv2i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv64i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv4i16(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv4i16(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv2i8_nxv4i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv4i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv8i64(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv8i64(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv2i8_nxv8i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv8i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv1i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv1i8(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv2i8_nxv1i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv1i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i8(i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8(,,,,,, i8*, , , i64) @@ -57760,142 +9381,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv8i32(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv8i32(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv2i8_nxv8i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv8i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv8i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv32i8(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv32i8(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv2i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv32i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv16i32(i8*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv16i32(,,,,,, i8*, , , i64) - -define @test_vloxseg6_nxv2i8_nxv16i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv16i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i16(i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16(,,,,,, i8*, , , i64) @@ -57912,26 +9416,23 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i64(i8*, , i64) @@ -57950,379 +9451,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv16i16(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv16i16(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv2i8_nxv16i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv16i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv32i16(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv32i16(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv2i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv32i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv4i32(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv4i32(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv2i8_nxv4i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv4i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv16i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv16i8(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv2i8_nxv16i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv16i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv1i64(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv1i64(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv2i8_nxv1i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv1i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv1i32(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv1i32(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv2i8_nxv1i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv8i16(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv8i16(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv2i8_nxv8i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv8i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv4i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv4i8(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv2i8_nxv4i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv1i16(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv1i16(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv2i8_nxv1i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i32(i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32(,,,,,,, i8*, , , i64) @@ -58339,11 +9486,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -58351,251 +9497,15 @@ define @test_vloxseg7_mask_nxv2i8_nxv2i32(i8* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv8i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv8i8(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv2i8_nxv8i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv8i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv4i64(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv4i64(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv2i8_nxv4i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv4i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv64i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv64i8(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv2i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv64i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv4i16(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv4i16(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv2i8_nxv4i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv8i64(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv8i64(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv2i8_nxv8i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv8i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv1i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv1i8(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv2i8_nxv1i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i8(i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8(,,,,,,, i8*, , , i64) @@ -58612,11 +9522,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -58624,134 +9533,15 @@ define @test_vloxseg7_mask_nxv2i8_nxv2i8(i8* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv8i32(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv8i32(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv2i8_nxv8i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv8i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv8i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv32i8(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv32i8(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv2i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv32i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv16i32(i8*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv16i32(,,,,,,, i8*, , , i64) - -define @test_vloxseg7_nxv2i8_nxv16i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv16i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i16(i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16(,,,,,,, i8*, , , i64) @@ -58768,11 +9558,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -58780,15 +9569,13 @@ define @test_vloxseg7_mask_nxv2i8_nxv2i16(i8* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i64(i8*, , i64) @@ -58807,11 +9594,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -58819,377 +9605,15 @@ define @test_vloxseg7_mask_nxv2i8_nxv2i64(i8* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv16i16(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv16i16(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv2i8_nxv16i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv2i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv32i16(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv32i16(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv2i8_nxv32i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv4i32(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv4i32(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv2i8_nxv4i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv16i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv16i8(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv2i8_nxv16i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv1i64(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv1i64(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv2i8_nxv1i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv1i32(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv1i32(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv2i8_nxv1i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv8i16(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv8i16(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv2i8_nxv8i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv4i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv4i8(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv2i8_nxv4i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv1i16(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv1i16(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv2i8_nxv1i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i32(i8*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32(,,,,,,,, i8*, , , i64) @@ -59206,270 +9630,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv8i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv8i8(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv2i8_nxv8i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv8i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv2i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv4i64(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv4i64(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv2i8_nxv4i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv64i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv64i8(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv2i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv4i16(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv4i16(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv2i8_nxv4i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv8i64(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv8i64(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv2i8_nxv8i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv1i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv1i8(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv2i8_nxv1i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i8(i8*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8(,,,,,,,, i8*, , , i64) @@ -59486,150 +9667,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv8i32(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv8i32(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv2i8_nxv8i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv8i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv2i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv32i8(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv32i8(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv2i8_nxv32i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv16i32(i8*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv16i32(,,,,,,,, i8*, , , i64) - -define @test_vloxseg8_nxv2i8_nxv16i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i16(i8*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16(,,,,,,,, i8*, , , i64) @@ -59646,28 +9704,25 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i64(i8*, , i64) @@ -59686,232 +9741,25 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv16i16(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv16i16(,, i32*, , , i64) - -define @test_vloxseg2_nxv8i32_nxv16i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv16i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv16i16( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv32i16(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv32i16(,, i32*, , , i64) - -define @test_vloxseg2_nxv8i32_nxv32i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv32i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv32i16( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv4i32(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv4i32(,, i32*, , , i64) - -define @test_vloxseg2_nxv8i32_nxv4i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv4i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv4i32( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv16i8(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv16i8(,, i32*, , , i64) - -define @test_vloxseg2_nxv8i32_nxv16i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv16i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv16i8( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv1i64(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv1i64(,, i32*, , , i64) - -define @test_vloxseg2_nxv8i32_nxv1i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv1i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv1i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv1i64( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv1i32(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv1i32(,, i32*, , , i64) - -define @test_vloxseg2_nxv8i32_nxv1i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv1i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv1i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv1i32( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i16(i32*, , i64) @@ -59930,126 +9778,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv8i32_nxv8i16( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv4i8(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv4i8(,, i32*, , , i64) - -define @test_vloxseg2_nxv8i32_nxv4i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv4i8(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16( %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv8i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv4i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv4i8( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv1i16(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv1i16(,, i32*, , , i64) - -define @test_vloxseg2_nxv8i32_nxv1i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv1i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv1i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv1i16( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv2i32(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv2i32(,, i32*, , , i64) - -define @test_vloxseg2_nxv8i32_nxv2i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv2i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv2i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv2i32( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i8(i32*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8(,, i32*, , , i64) @@ -60066,126 +9808,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv8i32_nxv8i8( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv4i64(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv4i64(,, i32*, , , i64) - -define @test_vloxseg2_nxv8i32_nxv4i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv4i64(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8( %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv8i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv4i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv4i64( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv64i8(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv64i8(,, i32*, , , i64) - -define @test_vloxseg2_nxv8i32_nxv64i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv64i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv64i8( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv4i16(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv4i16(,, i32*, , , i64) - -define @test_vloxseg2_nxv8i32_nxv4i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv4i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv4i16( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i64(i32*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64(,, i32*, , , i64) @@ -60202,92 +9838,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv8i32_nxv8i64( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vloxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv1i8(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv1i8(,, i32*, , , i64) - -define @test_vloxseg2_nxv8i32_nxv1i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv1i8(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64( %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv8i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv1i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv1i8( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv2i8(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv2i8(,, i32*, , , i64) - -define @test_vloxseg2_nxv8i32_nxv2i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv2i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv2i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv2i8( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i32(i32*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32(,, i32*, , , i64) @@ -60304,194 +9868,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv8i32_nxv8i32( %val, i32* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv32i8(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv32i8(,, i32*, , , i64) - -define @test_vloxseg2_nxv8i32_nxv32i8(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv32i8(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32( %val, %val, i32* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv8i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv32i8(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv32i8( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv16i32(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv16i32(,, i32*, , , i64) - -define @test_vloxseg2_nxv8i32_nxv16i32(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv16i32(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv16i32( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv2i16(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv2i16(,, i32*, , , i64) - -define @test_vloxseg2_nxv8i32_nxv2i16(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv2i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv2i16(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv2i16( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv2i64(i32*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv2i64(,, i32*, , , i64) - -define @test_vloxseg2_nxv8i32_nxv2i64(i32* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv2i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv2i64(i32* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv2i64( %1, %1, i32* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv16i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv16i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv32i8_nxv16i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv16i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv16i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i16(i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16(,, i8*, , , i64) @@ -60508,568 +9898,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv32i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv32i8_nxv32i16( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv4i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv4i32(,, i8*, , , i64) - -define @test_vloxseg2_nxv32i8_nxv4i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv4i32(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv32i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv4i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv4i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv16i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv16i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv32i8_nxv16i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv16i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv16i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv1i64(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv1i64(,, i8*, , , i64) - -define @test_vloxseg2_nxv32i8_nxv1i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv1i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv1i64( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv1i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv1i32(,, i8*, , , i64) - -define @test_vloxseg2_nxv32i8_nxv1i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv1i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv1i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv8i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv8i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv32i8_nxv8i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv8i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv8i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv4i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv4i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv32i8_nxv4i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv4i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv4i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv1i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv1i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv32i8_nxv1i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv1i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv1i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv2i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv2i32(,, i8*, , , i64) - -define @test_vloxseg2_nxv32i8_nxv2i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv2i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv2i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv8i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv8i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv32i8_nxv8i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv8i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv8i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv4i64(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv4i64(,, i8*, , , i64) - -define @test_vloxseg2_nxv32i8_nxv4i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv4i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv4i64( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv64i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv64i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv32i8_nxv64i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv64i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv64i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv4i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv4i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv32i8_nxv4i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv4i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv4i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv8i64(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv8i64(,, i8*, , , i64) - -define @test_vloxseg2_nxv32i8_nxv8i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv8i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv8i64( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv1i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv1i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv32i8_nxv1i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv1i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv1i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv2i8(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv2i8(,, i8*, , , i64) - -define @test_vloxseg2_nxv32i8_nxv2i8(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv2i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv2i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv8i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv8i32(,, i8*, , , i64) - -define @test_vloxseg2_nxv32i8_nxv8i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv8i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv8i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i8(i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8(,, i8*, , , i64) @@ -61086,432 +9928,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv32i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv32i8_nxv32i8( %val, i8* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i8(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv16i32(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv16i32(,, i8*, , , i64) - -define @test_vloxseg2_nxv32i8_nxv16i32(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv16i32(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv32i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv16i32(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv16i32( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv2i16(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv2i16(,, i8*, , , i64) - -define @test_vloxseg2_nxv32i8_nxv2i16(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv2i16(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv2i16( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv2i64(i8*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv2i64(,, i8*, , , i64) - -define @test_vloxseg2_nxv32i8_nxv2i64(i8* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv2i64(i8* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv2i64( %1, %1, i8* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv16i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv16i16(,, i16*, , , i64) - -define @test_vloxseg2_nxv2i16_nxv16i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv16i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv32i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv32i16(,, i16*, , , i64) - -define @test_vloxseg2_nxv2i16_nxv32i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv32i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv4i32(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv4i32(,, i16*, , , i64) - -define @test_vloxseg2_nxv2i16_nxv4i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv4i32( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv16i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv16i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv2i16_nxv16i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv16i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv1i64(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv1i64(,, i16*, , , i64) - -define @test_vloxseg2_nxv2i16_nxv1i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv1i64( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv1i32(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv1i32(,, i16*, , , i64) - -define @test_vloxseg2_nxv2i16_nxv1i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv1i32( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv8i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv8i16(,, i16*, , , i64) - -define @test_vloxseg2_nxv2i16_nxv8i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv8i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv4i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv4i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv2i16_nxv4i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv4i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv1i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv1i16(,, i16*, , , i64) - -define @test_vloxseg2_nxv2i16_nxv1i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv1i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i32(i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32(,, i16*, , , i64) @@ -61528,228 +9958,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv8i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv8i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv2i16_nxv8i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv8i8(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32( %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv8i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv4i64(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv4i64(,, i16*, , , i64) - -define @test_vloxseg2_nxv2i16_nxv4i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv4i64( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv64i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv64i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv2i16_nxv64i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv64i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv4i16(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv4i16(,, i16*, , , i64) - -define @test_vloxseg2_nxv2i16_nxv4i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv4i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv8i64(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv8i64(,, i16*, , , i64) - -define @test_vloxseg2_nxv2i16_nxv8i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv8i64( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv1i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv1i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv2i16_nxv1i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv1i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i8(i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8(,, i16*, , , i64) @@ -61766,126 +9988,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv8i32(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv8i32(,, i16*, , , i64) - -define @test_vloxseg2_nxv2i16_nxv8i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv8i32(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8( %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv8i32( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv32i8(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv32i8(,, i16*, , , i64) - -define @test_vloxseg2_nxv2i16_nxv32i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv32i8( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv16i32(i16*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv16i32(,, i16*, , , i64) - -define @test_vloxseg2_nxv2i16_nxv16i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv16i32( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i16(i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16(,, i16*, , , i64) @@ -61902,22 +10018,18 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i64(i16*, , i64) @@ -61936,339 +10048,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64( %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv16i16(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv16i16(,,, i16*, , , i64) - -define @test_vloxseg3_nxv2i16_nxv16i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv16i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv32i16(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv32i16(,,, i16*, , , i64) - -define @test_vloxseg3_nxv2i16_nxv32i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv32i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv4i32(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv4i32(,,, i16*, , , i64) - -define @test_vloxseg3_nxv2i16_nxv4i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv4i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv16i8(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv16i8(,,, i16*, , , i64) - -define @test_vloxseg3_nxv2i16_nxv16i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv16i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv1i64(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv1i64(,,, i16*, , , i64) - -define @test_vloxseg3_nxv2i16_nxv1i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv1i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv1i32(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv1i32(,,, i16*, , , i64) - -define @test_vloxseg3_nxv2i16_nxv1i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv1i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv8i16(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv8i16(,,, i16*, , , i64) - -define @test_vloxseg3_nxv2i16_nxv8i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv8i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv4i8(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv4i8(,,, i16*, , , i64) - -define @test_vloxseg3_nxv2i16_nxv4i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv4i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv1i16(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv1i16(,,, i16*, , , i64) - -define @test_vloxseg3_nxv2i16_nxv1i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv1i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i32(i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32(,,, i16*, , , i64) @@ -62285,235 +10078,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv8i8(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv8i8(,,, i16*, , , i64) - -define @test_vloxseg3_nxv2i16_nxv8i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv8i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv8i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv4i64(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv4i64(,,, i16*, , , i64) - -define @test_vloxseg3_nxv2i16_nxv4i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv4i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv64i8(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv64i8(,,, i16*, , , i64) - -define @test_vloxseg3_nxv2i16_nxv64i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv64i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv4i16(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv4i16(,,, i16*, , , i64) - -define @test_vloxseg3_nxv2i16_nxv4i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv4i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv8i64(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv8i64(,,, i16*, , , i64) - -define @test_vloxseg3_nxv2i16_nxv8i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv8i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv1i8(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv1i8(,,, i16*, , , i64) - -define @test_vloxseg3_nxv2i16_nxv1i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv1i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i8(i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8(,,, i16*, , , i64) @@ -62530,130 +10110,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv8i32(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv8i32(,,, i16*, , , i64) - -define @test_vloxseg3_nxv2i16_nxv8i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv8i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv8i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv32i8(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv32i8(,,, i16*, , , i64) - -define @test_vloxseg3_nxv2i16_nxv32i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv32i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv16i32(i16*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv16i32(,,, i16*, , , i64) - -define @test_vloxseg3_nxv2i16_nxv16i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv16i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i16(i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16(,,, i16*, , , i64) @@ -62670,23 +10142,20 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i64(i16*, , i64) @@ -62705,349 +10174,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv16i16(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv16i16(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv2i16_nxv16i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv16i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv32i16(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv32i16(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv2i16_nxv32i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv32i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv4i32(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv4i32(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv2i16_nxv4i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv4i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv16i8(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv16i8(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv2i16_nxv16i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv16i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv1i64(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv1i64(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv2i16_nxv1i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv1i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv1i32(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv1i32(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv2i16_nxv1i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv1i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv8i16(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv8i16(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv2i16_nxv8i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv8i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv4i8(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv4i8(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv2i16_nxv4i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv4i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv1i16(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv1i16(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv2i16_nxv1i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv1i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i32(i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32(,,,, i16*, , , i64) @@ -63064,242 +10205,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv8i8(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv8i8(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv2i16_nxv8i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv8i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv8i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv4i64(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv4i64(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv2i16_nxv4i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv4i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv64i8(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv64i8(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv2i16_nxv64i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv64i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv4i16(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv4i16(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv2i16_nxv4i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv4i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv8i64(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv8i64(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv2i16_nxv8i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv8i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv1i8(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv1i8(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv2i16_nxv1i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv1i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i8(i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8(,,,, i16*, , , i64) @@ -63316,134 +10238,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv8i32(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv8i32(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv2i16_nxv8i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv8i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv8i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv32i8(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv32i8(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv2i16_nxv32i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv32i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv16i32(i16*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv16i32(,,,, i16*, , , i64) - -define @test_vloxseg4_nxv2i16_nxv16i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv16i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i16(i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16(,,,, i16*, , , i64) @@ -63460,24 +10271,21 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i64(i16*, , i64) @@ -63496,359 +10304,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv16i16(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv16i16(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv2i16_nxv16i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv16i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv32i16(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv32i16(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv2i16_nxv32i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv32i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv4i32(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv4i32(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv2i16_nxv4i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv4i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv16i8(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv16i8(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv2i16_nxv16i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv16i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv1i64(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv1i64(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv2i16_nxv1i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv1i64( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv1i32(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv1i32(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv2i16_nxv1i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv1i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv8i16(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv8i16(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv2i16_nxv8i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv8i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv4i8(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv4i8(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv2i16_nxv4i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv4i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv1i16(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv1i16(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv2i16_nxv1i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv1i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i32(i16*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32(,,,,, i16*, , , i64) @@ -63865,249 +10337,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv8i8(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv8i8(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv2i16_nxv8i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv8i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv8i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv4i64(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv4i64(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv2i16_nxv4i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv4i64( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv64i8(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv64i8(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv2i16_nxv64i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv64i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv4i16(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv4i16(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv2i16_nxv4i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv4i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv8i64(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv8i64(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv2i16_nxv8i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv8i64( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv1i8(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv1i8(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv2i16_nxv1i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv1i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i8(i16*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8(,,,,, i16*, , , i64) @@ -64124,138 +10371,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv8i32(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv8i32(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv2i16_nxv8i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv8i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv8i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv32i8(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv32i8(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv2i16_nxv32i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv32i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv16i32(i16*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv16i32(,,,,, i16*, , , i64) - -define @test_vloxseg5_nxv2i16_nxv16i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv16i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i16(i16*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16(,,,,, i16*, , , i64) @@ -64272,25 +10405,22 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i64(i16*, , i64) @@ -64309,369 +10439,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv16i16(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv16i16(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv2i16_nxv16i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv16i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv32i16(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv32i16(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv2i16_nxv32i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv32i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv4i32(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv4i32(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv2i16_nxv4i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv4i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv16i8(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv16i8(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv2i16_nxv16i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv16i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv1i64(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv1i64(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv2i16_nxv1i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv1i64( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv1i32(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv1i32(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv2i16_nxv1i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv1i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv8i16(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv8i16(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv2i16_nxv8i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv8i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv4i8(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv4i8(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv2i16_nxv4i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv4i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv1i16(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv1i16(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv2i16_nxv1i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv1i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i32(i16*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32(,,,,,, i16*, , , i64) @@ -64688,256 +10473,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv8i8(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv8i8(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv2i16_nxv8i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv8i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv8i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv4i64(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv4i64(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv2i16_nxv4i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv4i64( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv64i8(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv64i8(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv2i16_nxv64i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv64i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv4i16(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv4i16(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv2i16_nxv4i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv4i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv8i64(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv8i64(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv2i16_nxv8i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv8i64( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv1i8(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv1i8(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv2i16_nxv1i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv1i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i8(i16*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8(,,,,,, i16*, , , i64) @@ -64954,142 +10508,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv8i32(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv8i32(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv2i16_nxv8i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv8i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv8i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv32i8(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv32i8(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv2i16_nxv32i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv32i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv16i32(i16*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv16i32(,,,,,, i16*, , , i64) - -define @test_vloxseg6_nxv2i16_nxv16i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv16i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i16(i16*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16(,,,,,, i16*, , , i64) @@ -65106,26 +10543,23 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i64(i16*, , i64) @@ -65144,379 +10578,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv16i16(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv16i16(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv2i16_nxv16i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv32i16(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv32i16(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv2i16_nxv32i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv4i32(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv4i32(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv2i16_nxv4i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv16i8(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv16i8(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv2i16_nxv16i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv1i64(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv1i64(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv2i16_nxv1i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv1i64( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv1i32(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv1i32(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv2i16_nxv1i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv8i16(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv8i16(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv2i16_nxv8i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv4i8(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv4i8(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv2i16_nxv4i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv1i16(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv1i16(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv2i16_nxv1i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i32(i16*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32(,,,,,,, i16*, , , i64) @@ -65533,11 +10613,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -65545,251 +10624,15 @@ define @test_vloxseg7_mask_nxv2i16_nxv2i32(i16* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv8i8(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv8i8(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv2i16_nxv8i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv8i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv4i64(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv4i64(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv2i16_nxv4i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv4i64( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv64i8(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv64i8(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv2i16_nxv64i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv4i16(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv4i16(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv2i16_nxv4i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv8i64(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv8i64(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv2i16_nxv8i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv8i64( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv1i8(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv1i8(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv2i16_nxv1i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i8(i16*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8(,,,,,,, i16*, , , i64) @@ -65806,11 +10649,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -65818,134 +10660,15 @@ define @test_vloxseg7_mask_nxv2i16_nxv2i8(i16* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv8i32(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv8i32(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv2i16_nxv8i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv8i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv32i8(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv32i8(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv2i16_nxv32i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv16i32(i16*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv16i32(,,,,,,, i16*, , , i64) - -define @test_vloxseg7_nxv2i16_nxv16i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i16(i16*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16(,,,,,,, i16*, , , i64) @@ -65962,11 +10685,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -65974,15 +10696,13 @@ define @test_vloxseg7_mask_nxv2i16_nxv2i16(i16* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i64(i16*, , i64) @@ -66001,11 +10721,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -66013,377 +10732,15 @@ define @test_vloxseg7_mask_nxv2i16_nxv2i64(i16* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv16i16(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv16i16(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv2i16_nxv16i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv2i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv16i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv32i16(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv32i16(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv2i16_nxv32i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv32i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv4i32(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv4i32(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv2i16_nxv4i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv4i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv16i8(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv16i8(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv2i16_nxv16i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv16i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv1i64(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv1i64(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv2i16_nxv1i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv1i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv1i32(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv1i32(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv2i16_nxv1i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv1i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv8i16(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv8i16(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv2i16_nxv8i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv8i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv4i8(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv4i8(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv2i16_nxv4i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv4i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv1i16(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv1i16(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv2i16_nxv1i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv1i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i32(i16*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32(,,,,,,,, i16*, , , i64) @@ -66400,270 +10757,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv8i8(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv8i8(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv2i16_nxv8i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv8i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv2i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv8i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv4i64(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv4i64(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv2i16_nxv4i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv4i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv64i8(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv64i8(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv2i16_nxv64i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv64i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv4i16(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv4i16(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv2i16_nxv4i16(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv4i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv8i64(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv8i64(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv2i16_nxv8i64(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv8i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv1i8(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv1i8(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv2i16_nxv1i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv1i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i8(i16*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8(,,,,,,,, i16*, , , i64) @@ -66680,150 +10794,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv8i32(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv8i32(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv2i16_nxv8i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv8i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv2i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv8i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv32i8(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv32i8(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv2i16_nxv32i8(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv32i8(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv16i32(i16*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv16i32(,,,,,,,, i16*, , , i64) - -define @test_vloxseg8_nxv2i16_nxv16i32(i16* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv16i32(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i16(i16*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16(,,,,,,,, i16*, , , i64) @@ -66840,28 +10831,25 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i64(i16*, , i64) @@ -66880,334 +10868,25 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv16i16(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv16i16(,, i64*, , , i64) - -define @test_vloxseg2_nxv2i64_nxv16i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv16i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i64_nxv16i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv16i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv16i16( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv32i16(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv32i16(,, i64*, , , i64) - -define @test_vloxseg2_nxv2i64_nxv32i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv32i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i64_nxv32i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv32i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv32i16( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv4i32(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv4i32(,, i64*, , , i64) - -define @test_vloxseg2_nxv2i64_nxv4i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv4i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i64_nxv4i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv4i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv4i32( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv16i8(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv16i8(,, i64*, , , i64) - -define @test_vloxseg2_nxv2i64_nxv16i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv16i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i64_nxv16i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv16i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv16i8( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv1i64(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv1i64(,, i64*, , , i64) - -define @test_vloxseg2_nxv2i64_nxv1i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv1i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i64_nxv1i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv1i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv1i64( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv1i32(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv1i32(,, i64*, , , i64) - -define @test_vloxseg2_nxv2i64_nxv1i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv1i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i64_nxv1i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv1i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv1i32( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv8i16(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv8i16(,, i64*, , , i64) - -define @test_vloxseg2_nxv2i64_nxv8i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv8i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i64_nxv8i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv8i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv8i16( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv4i8(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv4i8(,, i64*, , , i64) - -define @test_vloxseg2_nxv2i64_nxv4i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv4i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i64_nxv4i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv4i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv4i8( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv1i16(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv1i16(,, i64*, , , i64) - -define @test_vloxseg2_nxv2i64_nxv1i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv1i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i64_nxv1i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv1i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv1i16( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i32(i64*, , i64) @@ -67226,228 +10905,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2i64_nxv2i32(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv2i64_nxv2i32( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv8i8(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv8i8(,, i64*, , , i64) - -define @test_vloxseg2_nxv2i64_nxv8i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv8i8(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32( %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2i64_nxv8i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv8i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv8i8( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv4i64(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv4i64(,, i64*, , , i64) - -define @test_vloxseg2_nxv2i64_nxv4i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv4i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i64_nxv4i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv4i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv4i64( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv64i8(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv64i8(,, i64*, , , i64) - -define @test_vloxseg2_nxv2i64_nxv64i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv64i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i64_nxv64i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv64i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv64i8( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv4i16(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv4i16(,, i64*, , , i64) - -define @test_vloxseg2_nxv2i64_nxv4i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv4i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i64_nxv4i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv4i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv4i16( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv8i64(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv8i64(,, i64*, , , i64) - -define @test_vloxseg2_nxv2i64_nxv8i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv8i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i64_nxv8i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv8i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv8i64( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv1i8(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv1i8(,, i64*, , , i64) - -define @test_vloxseg2_nxv2i64_nxv1i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv1i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i64_nxv1i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv1i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv1i8( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i8(i64*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8(,, i64*, , , i64) @@ -67464,126 +10935,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2i64_nxv2i8(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv2i64_nxv2i8( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv8i32(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv8i32(,, i64*, , , i64) - -define @test_vloxseg2_nxv2i64_nxv8i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv8i32(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8( %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2i64_nxv8i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv8i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv8i32( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv32i8(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv32i8(,, i64*, , , i64) - -define @test_vloxseg2_nxv2i64_nxv32i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv32i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i64_nxv32i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv32i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv32i8( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv16i32(i64*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv16i32(,, i64*, , , i64) - -define @test_vloxseg2_nxv2i64_nxv16i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv16i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i64_nxv16i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv16i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv16i32( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i16(i64*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16(,, i64*, , , i64) @@ -67600,22 +10965,18 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2i64_nxv2i16(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv2i64_nxv2i16( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16( %val, %val, i64* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i64(i64*, , i64) @@ -67634,339 +10995,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2i64_nxv2i64(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv2i64_nxv2i64( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64( %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv16i16(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv16i16(,,, i64*, , , i64) - -define @test_vloxseg3_nxv2i64_nxv16i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv16i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64( %val, %val, i64* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2i64_nxv16i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv16i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv16i16( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv32i16(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv32i16(,,, i64*, , , i64) - -define @test_vloxseg3_nxv2i64_nxv32i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv32i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i64_nxv32i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv32i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv32i16( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv4i32(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv4i32(,,, i64*, , , i64) - -define @test_vloxseg3_nxv2i64_nxv4i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv4i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i64_nxv4i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv4i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv4i32( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv16i8(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv16i8(,,, i64*, , , i64) - -define @test_vloxseg3_nxv2i64_nxv16i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv16i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i64_nxv16i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv16i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv16i8( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv1i64(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv1i64(,,, i64*, , , i64) - -define @test_vloxseg3_nxv2i64_nxv1i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv1i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i64_nxv1i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv1i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv1i64( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv1i32(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv1i32(,,, i64*, , , i64) - -define @test_vloxseg3_nxv2i64_nxv1i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv1i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i64_nxv1i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv1i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv1i32( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv8i16(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv8i16(,,, i64*, , , i64) - -define @test_vloxseg3_nxv2i64_nxv8i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv8i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i64_nxv8i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv8i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv8i16( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv4i8(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv4i8(,,, i64*, , , i64) - -define @test_vloxseg3_nxv2i64_nxv4i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv4i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i64_nxv4i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv4i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv4i8( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv1i16(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv1i16(,,, i64*, , , i64) - -define @test_vloxseg3_nxv2i64_nxv1i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv1i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i64_nxv1i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv1i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv1i16( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i32(i64*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32(,,, i64*, , , i64) @@ -67983,235 +11025,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2i64_nxv2i32(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv2i64_nxv2i32( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv8i8(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv8i8(,,, i64*, , , i64) - -define @test_vloxseg3_nxv2i64_nxv8i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv8i8(i64* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2i64_nxv8i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv8i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv8i8( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv4i64(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv4i64(,,, i64*, , , i64) - -define @test_vloxseg3_nxv2i64_nxv4i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv4i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i64_nxv4i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv4i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv4i64( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv64i8(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv64i8(,,, i64*, , , i64) - -define @test_vloxseg3_nxv2i64_nxv64i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv64i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i64_nxv64i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv64i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv64i8( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv4i16(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv4i16(,,, i64*, , , i64) - -define @test_vloxseg3_nxv2i64_nxv4i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv4i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i64_nxv4i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv4i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv4i16( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv8i64(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv8i64(,,, i64*, , , i64) - -define @test_vloxseg3_nxv2i64_nxv8i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv8i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i64_nxv8i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv8i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv8i64( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv1i8(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv1i8(,,, i64*, , , i64) - -define @test_vloxseg3_nxv2i64_nxv1i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv1i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i64_nxv1i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv1i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv1i8( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i8(i64*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8(,,, i64*, , , i64) @@ -68228,130 +11057,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2i64_nxv2i8(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv2i64_nxv2i8( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv8i32(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv8i32(,,, i64*, , , i64) - -define @test_vloxseg3_nxv2i64_nxv8i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv8i32(i64* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2i64_nxv8i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv8i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv8i32( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv32i8(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv32i8(,,, i64*, , , i64) - -define @test_vloxseg3_nxv2i64_nxv32i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv32i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i64_nxv32i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv32i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv32i8( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv16i32(i64*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv16i32(,,, i64*, , , i64) - -define @test_vloxseg3_nxv2i64_nxv16i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv16i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i64_nxv16i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv16i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv16i32( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i16(i64*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16(,,, i64*, , , i64) @@ -68368,23 +11089,20 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2i64_nxv2i16(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv2i64_nxv2i16( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i64(i64*, , i64) @@ -68403,349 +11121,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2i64_nxv2i64(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv2i64_nxv2i64( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv16i16(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv16i16(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv2i64_nxv16i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv16i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2i64_nxv16i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv16i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv16i16( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv32i16(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv32i16(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv2i64_nxv32i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv32i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i64_nxv32i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv32i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv32i16( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv4i32(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv4i32(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv2i64_nxv4i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv4i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i64_nxv4i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv4i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv4i32( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv16i8(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv16i8(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv2i64_nxv16i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv16i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i64_nxv16i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv16i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv16i8( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv1i64(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv1i64(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv2i64_nxv1i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv1i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i64_nxv1i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv1i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv1i64( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv1i32(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv1i32(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv2i64_nxv1i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv1i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i64_nxv1i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv1i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv1i32( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv8i16(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv8i16(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv2i64_nxv8i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv8i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i64_nxv8i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv8i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv8i16( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv4i8(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv4i8(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv2i64_nxv4i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv4i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i64_nxv4i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv4i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv4i8( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv1i16(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv1i16(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv2i64_nxv1i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv1i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i64_nxv1i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv1i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv1i16( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i32(i64*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32(,,,, i64*, , , i64) @@ -68762,242 +11153,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2i64_nxv2i32(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv2i64_nxv2i32( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv8i8(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv8i8(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv2i64_nxv8i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv8i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i64_nxv8i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv8i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv8i8( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv4i64(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv4i64(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv2i64_nxv4i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv4i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i64_nxv4i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv4i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv4i64( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv64i8(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv64i8(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv2i64_nxv64i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv64i8(i64* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2i64_nxv64i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv64i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv64i8( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv4i16(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv4i16(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv2i64_nxv4i16(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv4i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i64_nxv4i16(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv4i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv4i16( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv8i64(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv8i64(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv2i64_nxv8i64(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv8i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i64_nxv8i64(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv8i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv8i64( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv1i8(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv1i8(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv2i64_nxv1i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv1i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i64_nxv1i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv1i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv1i8( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i8(i64*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8(,,,, i64*, , , i64) @@ -69014,134 +11186,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2i64_nxv2i8(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv2i64_nxv2i8( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv8i32(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv8i32(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv2i64_nxv8i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv8i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i64_nxv8i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv8i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv8i32( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv32i8(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv32i8(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv2i64_nxv32i8(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv32i8(i64* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2i64_nxv32i8(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv32i8(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv32i8( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv16i32(i64*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv16i32(,,,, i64*, , , i64) - -define @test_vloxseg4_nxv2i64_nxv16i32(i64* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv16i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i64_nxv16i32(i64* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv16i32(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv16i32( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i16(i64*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16(,,,, i64*, , , i64) @@ -69158,24 +11219,21 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2i64_nxv2i16(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv2i64_nxv2i16( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i64(i64*, , i64) @@ -69194,24 +11252,21 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2i64_nxv2i64(i64* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv2i64_nxv2i64( %val, i64* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i16(half*, , i64) @@ -69230,92 +11285,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv16f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv16f16_nxv16i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv32i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv32i16(,, half*, , , i64) - -define @test_vloxseg2_nxv16f16_nxv32i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv32i16(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16( %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv16f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv32i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv4i32(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv4i32(,, half*, , , i64) - -define @test_vloxseg2_nxv16f16_nxv4i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv4i32( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i8(half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8(,, half*, , , i64) @@ -69332,534 +11315,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv16f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv16f16_nxv16i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv1i64(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv1i64(,, half*, , , i64) - -define @test_vloxseg2_nxv16f16_nxv1i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv1i64(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8( %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv16f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv1i64( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv1i32(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv1i32(,, half*, , , i64) - -define @test_vloxseg2_nxv16f16_nxv1i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv1i32( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv8i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv8i16(,, half*, , , i64) - -define @test_vloxseg2_nxv16f16_nxv8i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv8i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv4i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv4i8(,, half*, , , i64) - -define @test_vloxseg2_nxv16f16_nxv4i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv4i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv1i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv1i16(,, half*, , , i64) - -define @test_vloxseg2_nxv16f16_nxv1i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv1i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv2i32(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv2i32(,, half*, , , i64) - -define @test_vloxseg2_nxv16f16_nxv2i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv2i32( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv8i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv8i8(,, half*, , , i64) - -define @test_vloxseg2_nxv16f16_nxv8i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv8i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv4i64(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv4i64(,, half*, , , i64) - -define @test_vloxseg2_nxv16f16_nxv4i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv4i64( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv64i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv64i8(,, half*, , , i64) - -define @test_vloxseg2_nxv16f16_nxv64i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv64i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv4i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv4i16(,, half*, , , i64) - -define @test_vloxseg2_nxv16f16_nxv4i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv4i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv8i64(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv8i64(,, half*, , , i64) - -define @test_vloxseg2_nxv16f16_nxv8i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv8i64( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv1i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv1i8(,, half*, , , i64) - -define @test_vloxseg2_nxv16f16_nxv1i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv1i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv2i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv2i8(,, half*, , , i64) - -define @test_vloxseg2_nxv16f16_nxv2i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv2i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv8i32(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv8i32(,, half*, , , i64) - -define @test_vloxseg2_nxv16f16_nxv8i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv8i32( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv32i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv32i8(,, half*, , , i64) - -define @test_vloxseg2_nxv16f16_nxv32i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv32i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i32(half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32(,, half*, , , i64) @@ -69876,160 +11345,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv16f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv16f16_nxv16i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv2i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv2i16(,, half*, , , i64) - -define @test_vloxseg2_nxv16f16_nxv2i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv2i16(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32( %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv16f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv2i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv2i64(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv2i64(,, half*, , , i64) - -define @test_vloxseg2_nxv16f16_nxv2i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv2i64( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv16i16(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv16i16(,, double*, , , i64) - -define @test_vloxseg2_nxv4f64_nxv16i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv16i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f64_nxv16i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv16i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv16i16( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv32i16(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv32i16(,, double*, , , i64) - -define @test_vloxseg2_nxv4f64_nxv32i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv32i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f64_nxv32i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv32i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv32i16( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i32(double*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32(,, double*, , , i64) @@ -70046,160 +11375,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4f64_nxv4i32(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv4f64_nxv4i32( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv16i8(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv16i8(,, double*, , , i64) - -define @test_vloxseg2_nxv4f64_nxv16i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv16i8(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32( %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4f64_nxv16i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv16i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv16i8( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv1i64(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv1i64(,, double*, , , i64) - -define @test_vloxseg2_nxv4f64_nxv1i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv1i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f64_nxv1i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv1i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv1i64( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv1i32(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv1i32(,, double*, , , i64) - -define @test_vloxseg2_nxv4f64_nxv1i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv1i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f64_nxv1i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv1i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv1i32( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv8i16(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv8i16(,, double*, , , i64) - -define @test_vloxseg2_nxv4f64_nxv8i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv8i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f64_nxv8i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv8i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv8i16( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i8(double*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8(,, double*, , , i64) @@ -70216,126 +11405,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4f64_nxv4i8(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv4f64_nxv4i8( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv1i16(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv1i16(,, double*, , , i64) - -define @test_vloxseg2_nxv4f64_nxv1i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv1i16(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8( %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4f64_nxv1i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv1i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv1i16( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv2i32(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv2i32(,, double*, , , i64) - -define @test_vloxseg2_nxv4f64_nxv2i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv2i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f64_nxv2i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv2i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv2i32( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv8i8(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv8i8(,, double*, , , i64) - -define @test_vloxseg2_nxv4f64_nxv8i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv8i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f64_nxv8i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv8i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv8i8( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i64(double*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i64(,, double*, , , i64) @@ -70352,58 +11435,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4f64_nxv4i64(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv4f64_nxv4i64( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei64.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i64( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv64i8(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv64i8(,, double*, , , i64) - -define @test_vloxseg2_nxv4f64_nxv64i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv64i8(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i64( %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4f64_nxv64i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv64i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv64i8( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i16(double*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16(,, double*, , , i64) @@ -70420,432 +11465,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4f64_nxv4i16(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv4f64_nxv4i16( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv8i64(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv8i64(,, double*, , , i64) - -define @test_vloxseg2_nxv4f64_nxv8i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv8i64(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16( %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4f64_nxv8i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv8i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv8i64( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv1i8(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv1i8(,, double*, , , i64) - -define @test_vloxseg2_nxv4f64_nxv1i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv1i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f64_nxv1i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv1i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv1i8( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv2i8(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv2i8(,, double*, , , i64) - -define @test_vloxseg2_nxv4f64_nxv2i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv2i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f64_nxv2i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv2i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv2i8( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv8i32(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv8i32(,, double*, , , i64) - -define @test_vloxseg2_nxv4f64_nxv8i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv8i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f64_nxv8i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv8i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv8i32( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv32i8(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv32i8(,, double*, , , i64) - -define @test_vloxseg2_nxv4f64_nxv32i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv32i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f64_nxv32i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv32i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv32i8( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv16i32(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv16i32(,, double*, , , i64) - -define @test_vloxseg2_nxv4f64_nxv16i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv16i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f64_nxv16i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv16i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv16i32( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv2i16(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv2i16(,, double*, , , i64) - -define @test_vloxseg2_nxv4f64_nxv2i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv2i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f64_nxv2i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv2i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv2i16( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv2i64(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv2i64(,, double*, , , i64) - -define @test_vloxseg2_nxv4f64_nxv2i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv2i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f64_nxv2i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv2i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv2i64( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv16i16(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv16i16(,, double*, , , i64) - -define @test_vloxseg2_nxv1f64_nxv16i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv16i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f64_nxv16i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv16i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv16i16( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv32i16(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv32i16(,, double*, , , i64) - -define @test_vloxseg2_nxv1f64_nxv32i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv32i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f64_nxv32i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv32i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv32i16( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv4i32(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv4i32(,, double*, , , i64) - -define @test_vloxseg2_nxv1f64_nxv4i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv4i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f64_nxv4i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv4i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv4i32( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv16i8(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv16i8(,, double*, , , i64) - -define @test_vloxseg2_nxv1f64_nxv16i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv16i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f64_nxv16i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv16i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv16i8( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i64(double*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i64(,, double*, , , i64) @@ -70862,22 +11495,18 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1f64_nxv1i64(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i64( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i64( %val, %val, double* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i32(double*, , i64) @@ -70896,92 +11525,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1f64_nxv1i32(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv8i16(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv8i16(,, double*, , , i64) - -define @test_vloxseg2_nxv1f64_nxv8i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv8i16(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32( %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1f64_nxv8i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv8i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv8i16( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv4i8(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv4i8(,, double*, , , i64) - -define @test_vloxseg2_nxv1f64_nxv4i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv4i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f64_nxv4i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv4i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv4i8( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i16(double*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16(,, double*, , , i64) @@ -70998,228 +11555,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1f64_nxv1i16(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv2i32(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv2i32(,, double*, , , i64) - -define @test_vloxseg2_nxv1f64_nxv2i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv2i32(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16( %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1f64_nxv2i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv2i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv2i32( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv8i8(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv8i8(,, double*, , , i64) - -define @test_vloxseg2_nxv1f64_nxv8i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv8i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f64_nxv8i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv8i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv8i8( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv4i64(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv4i64(,, double*, , , i64) - -define @test_vloxseg2_nxv1f64_nxv4i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv4i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f64_nxv4i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv4i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv4i64( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv64i8(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv64i8(,, double*, , , i64) - -define @test_vloxseg2_nxv1f64_nxv64i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv64i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f64_nxv64i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv64i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv64i8( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv4i16(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv4i16(,, double*, , , i64) - -define @test_vloxseg2_nxv1f64_nxv4i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv4i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f64_nxv4i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv4i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv4i16( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv8i64(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv8i64(,, double*, , , i64) - -define @test_vloxseg2_nxv1f64_nxv8i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv8i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f64_nxv8i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv8i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv8i64( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i8(double*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8(,, double*, , , i64) @@ -71236,368 +11585,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1f64_nxv1i8(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv2i8(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv2i8(,, double*, , , i64) - -define @test_vloxseg2_nxv1f64_nxv2i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv2i8(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8( %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1f64_nxv2i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv2i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv2i8( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv8i32(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv8i32(,, double*, , , i64) - -define @test_vloxseg2_nxv1f64_nxv8i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv8i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f64_nxv8i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv8i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv8i32( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv32i8(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv32i8(,, double*, , , i64) - -define @test_vloxseg2_nxv1f64_nxv32i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv32i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f64_nxv32i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv32i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv32i8( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv16i32(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv16i32(,, double*, , , i64) - -define @test_vloxseg2_nxv1f64_nxv16i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv16i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f64_nxv16i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv16i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv16i32( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv2i16(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv2i16(,, double*, , , i64) - -define @test_vloxseg2_nxv1f64_nxv2i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv2i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f64_nxv2i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv2i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv2i16( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv2i64(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv2i64(,, double*, , , i64) - -define @test_vloxseg2_nxv1f64_nxv2i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv2i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f64_nxv2i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv2i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv2i64( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv16i16(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv16i16(,,, double*, , , i64) - -define @test_vloxseg3_nxv1f64_nxv16i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv16i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f64_nxv16i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv16i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv16i16( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv32i16(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv32i16(,,, double*, , , i64) - -define @test_vloxseg3_nxv1f64_nxv32i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv32i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f64_nxv32i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv32i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv32i16( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv4i32(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv4i32(,,, double*, , , i64) - -define @test_vloxseg3_nxv1f64_nxv4i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv4i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f64_nxv4i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv4i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv4i32( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv16i8(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv16i8(,,, double*, , , i64) - -define @test_vloxseg3_nxv1f64_nxv16i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv16i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f64_nxv16i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv16i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv16i8( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i64(double*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i64(,,, double*, , , i64) @@ -71614,23 +11615,20 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1f64_nxv1i64(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i64( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i32(double*, , i64) @@ -71649,95 +11647,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1f64_nxv1i32(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv8i16(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv8i16(,,, double*, , , i64) - -define @test_vloxseg3_nxv1f64_nxv8i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv8i16(double* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1f64_nxv8i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv8i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv8i16( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv4i8(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv4i8(,,, double*, , , i64) - -define @test_vloxseg3_nxv1f64_nxv4i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv4i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f64_nxv4i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv4i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv4i8( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i16(double*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16(,,, double*, , , i64) @@ -71754,235 +11679,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1f64_nxv1i16(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv2i32(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv2i32(,,, double*, , , i64) - -define @test_vloxseg3_nxv1f64_nxv2i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv2i32(double* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1f64_nxv2i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv2i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv2i32( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv8i8(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv8i8(,,, double*, , , i64) - -define @test_vloxseg3_nxv1f64_nxv8i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv8i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f64_nxv8i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv8i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv8i8( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv4i64(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv4i64(,,, double*, , , i64) - -define @test_vloxseg3_nxv1f64_nxv4i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv4i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f64_nxv4i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv4i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv4i64( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv64i8(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv64i8(,,, double*, , , i64) - -define @test_vloxseg3_nxv1f64_nxv64i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv64i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f64_nxv64i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv64i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv64i8( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv4i16(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv4i16(,,, double*, , , i64) - -define @test_vloxseg3_nxv1f64_nxv4i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv4i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f64_nxv4i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv4i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv4i16( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv8i64(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv8i64(,,, double*, , , i64) - -define @test_vloxseg3_nxv1f64_nxv8i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv8i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f64_nxv8i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv8i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv8i64( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i8(double*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8(,,, double*, , , i64) @@ -71999,379 +11711,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1f64_nxv1i8(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv2i8(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv2i8(,,, double*, , , i64) - -define @test_vloxseg3_nxv1f64_nxv2i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv2i8(double* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1f64_nxv2i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv2i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv2i8( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv8i32(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv8i32(,,, double*, , , i64) - -define @test_vloxseg3_nxv1f64_nxv8i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv8i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f64_nxv8i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv8i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv8i32( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv32i8(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv32i8(,,, double*, , , i64) - -define @test_vloxseg3_nxv1f64_nxv32i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv32i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f64_nxv32i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv32i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv32i8( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv16i32(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv16i32(,,, double*, , , i64) - -define @test_vloxseg3_nxv1f64_nxv16i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv16i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f64_nxv16i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv16i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv16i32( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv2i16(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv2i16(,,, double*, , , i64) - -define @test_vloxseg3_nxv1f64_nxv2i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv2i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f64_nxv2i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv2i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv2i16( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv2i64(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv2i64(,,, double*, , , i64) - -define @test_vloxseg3_nxv1f64_nxv2i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv2i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f64_nxv2i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv2i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv2i64( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv16i16(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv16i16(,,,, double*, , , i64) - -define @test_vloxseg4_nxv1f64_nxv16i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv16i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f64_nxv16i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv16i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv16i16( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv32i16(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv32i16(,,,, double*, , , i64) - -define @test_vloxseg4_nxv1f64_nxv32i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv32i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f64_nxv32i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv32i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv32i16( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv4i32(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv4i32(,,,, double*, , , i64) - -define @test_vloxseg4_nxv1f64_nxv4i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv4i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f64_nxv4i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv4i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv4i32( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv16i8(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv16i8(,,,, double*, , , i64) - -define @test_vloxseg4_nxv1f64_nxv16i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv16i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f64_nxv16i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv16i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv16i8( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i64(double*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i64(,,,, double*, , , i64) @@ -72388,24 +11743,21 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1f64_nxv1i64(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i64( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i32(double*, , i64) @@ -72424,98 +11776,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1f64_nxv1i32(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv8i16(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv8i16(,,,, double*, , , i64) - -define @test_vloxseg4_nxv1f64_nxv8i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv8i16(double* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1f64_nxv8i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv8i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv8i16( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv4i8(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv4i8(,,,, double*, , , i64) - -define @test_vloxseg4_nxv1f64_nxv4i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv4i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f64_nxv4i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv4i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv4i8( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i16(double*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16(,,,, double*, , , i64) @@ -72532,242 +11809,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1f64_nxv1i16(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv2i32(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv2i32(,,,, double*, , , i64) - -define @test_vloxseg4_nxv1f64_nxv2i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv2i32(double* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1f64_nxv2i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv2i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv2i32( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv8i8(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv8i8(,,,, double*, , , i64) - -define @test_vloxseg4_nxv1f64_nxv8i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv8i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f64_nxv8i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv8i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv8i8( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv4i64(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv4i64(,,,, double*, , , i64) - -define @test_vloxseg4_nxv1f64_nxv4i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv4i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f64_nxv4i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv4i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv4i64( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv64i8(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv64i8(,,,, double*, , , i64) - -define @test_vloxseg4_nxv1f64_nxv64i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv64i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f64_nxv64i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv64i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv64i8( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv4i16(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv4i16(,,,, double*, , , i64) - -define @test_vloxseg4_nxv1f64_nxv4i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv4i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f64_nxv4i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv4i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv4i16( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv8i64(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv8i64(,,,, double*, , , i64) - -define @test_vloxseg4_nxv1f64_nxv8i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv8i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f64_nxv8i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv8i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv8i64( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i8(double*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8(,,,, double*, , , i64) @@ -72784,390 +11842,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1f64_nxv1i8(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv2i8(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv2i8(,,,, double*, , , i64) - -define @test_vloxseg4_nxv1f64_nxv2i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv2i8(double* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1f64_nxv2i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv2i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv2i8( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv8i32(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv8i32(,,,, double*, , , i64) - -define @test_vloxseg4_nxv1f64_nxv8i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv8i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f64_nxv8i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv8i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv8i32( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv32i8(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv32i8(,,,, double*, , , i64) - -define @test_vloxseg4_nxv1f64_nxv32i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv32i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f64_nxv32i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv32i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv32i8( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv16i32(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv16i32(,,,, double*, , , i64) - -define @test_vloxseg4_nxv1f64_nxv16i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv16i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f64_nxv16i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv16i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv16i32( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv2i16(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv2i16(,,,, double*, , , i64) - -define @test_vloxseg4_nxv1f64_nxv2i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv2i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f64_nxv2i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv2i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv2i16( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv2i64(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv2i64(,,,, double*, , , i64) - -define @test_vloxseg4_nxv1f64_nxv2i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv2i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f64_nxv2i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv2i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv2i64( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv16i16(double*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv16i16(,,,,, double*, , , i64) - -define @test_vloxseg5_nxv1f64_nxv16i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv16i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f64_nxv16i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv16i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv16i16( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv32i16(double*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv32i16(,,,,, double*, , , i64) - -define @test_vloxseg5_nxv1f64_nxv32i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv32i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f64_nxv32i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv32i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv32i16( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv4i32(double*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv4i32(,,,,, double*, , , i64) - -define @test_vloxseg5_nxv1f64_nxv4i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv4i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f64_nxv4i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv4i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv4i32( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv16i8(double*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv16i8(,,,,, double*, , , i64) - -define @test_vloxseg5_nxv1f64_nxv16i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv16i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f64_nxv16i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv16i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv16i8( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i64(double*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i64(,,,,, double*, , , i64) @@ -73184,25 +11875,22 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1f64_nxv1i64(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i64( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i32(double*, , i64) @@ -73221,101 +11909,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1f64_nxv1i32(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv8i16(double*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv8i16(,,,,, double*, , , i64) - -define @test_vloxseg5_nxv1f64_nxv8i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv8i16(double* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1f64_nxv8i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv8i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv8i16( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv4i8(double*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv4i8(,,,,, double*, , , i64) - -define @test_vloxseg5_nxv1f64_nxv4i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv4i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f64_nxv4i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv4i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv4i8( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i16(double*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16(,,,,, double*, , , i64) @@ -73332,249 +11943,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1f64_nxv1i16(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv2i32(double*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv2i32(,,,,, double*, , , i64) - -define @test_vloxseg5_nxv1f64_nxv2i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv2i32(double* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1f64_nxv2i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv2i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv2i32( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv8i8(double*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv8i8(,,,,, double*, , , i64) - -define @test_vloxseg5_nxv1f64_nxv8i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv8i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f64_nxv8i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv8i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv8i8( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv4i64(double*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv4i64(,,,,, double*, , , i64) - -define @test_vloxseg5_nxv1f64_nxv4i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv4i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f64_nxv4i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv4i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv4i64( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv64i8(double*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv64i8(,,,,, double*, , , i64) - -define @test_vloxseg5_nxv1f64_nxv64i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv64i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f64_nxv64i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv64i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv64i8( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv4i16(double*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv4i16(,,,,, double*, , , i64) - -define @test_vloxseg5_nxv1f64_nxv4i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv4i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f64_nxv4i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv4i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv4i16( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv8i64(double*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv8i64(,,,,, double*, , , i64) - -define @test_vloxseg5_nxv1f64_nxv8i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv8i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f64_nxv8i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv8i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv8i64( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i8(double*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8(,,,,, double*, , , i64) @@ -73591,401 +11977,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1f64_nxv1i8(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv2i8(double*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv2i8(,,,,, double*, , , i64) - -define @test_vloxseg5_nxv1f64_nxv2i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv2i8(double* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1f64_nxv2i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv2i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv2i8( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv8i32(double*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv8i32(,,,,, double*, , , i64) - -define @test_vloxseg5_nxv1f64_nxv8i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv8i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f64_nxv8i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv8i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv8i32( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv32i8(double*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv32i8(,,,,, double*, , , i64) - -define @test_vloxseg5_nxv1f64_nxv32i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv32i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f64_nxv32i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv32i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv32i8( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv16i32(double*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv16i32(,,,,, double*, , , i64) - -define @test_vloxseg5_nxv1f64_nxv16i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv16i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f64_nxv16i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv16i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv16i32( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv2i16(double*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv2i16(,,,,, double*, , , i64) - -define @test_vloxseg5_nxv1f64_nxv2i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv2i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f64_nxv2i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv2i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv2i16( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv2i64(double*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv2i64(,,,,, double*, , , i64) - -define @test_vloxseg5_nxv1f64_nxv2i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv2i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f64_nxv2i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv2i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv2i64( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv16i16(double*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv16i16(,,,,,, double*, , , i64) - -define @test_vloxseg6_nxv1f64_nxv16i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv16i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f64_nxv16i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv16i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv16i16( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv32i16(double*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv32i16(,,,,,, double*, , , i64) - -define @test_vloxseg6_nxv1f64_nxv32i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv32i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f64_nxv32i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv32i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv32i16( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv4i32(double*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv4i32(,,,,,, double*, , , i64) - -define @test_vloxseg6_nxv1f64_nxv4i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv4i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f64_nxv4i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv4i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv4i32( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv16i8(double*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv16i8(,,,,,, double*, , , i64) - -define @test_vloxseg6_nxv1f64_nxv16i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv16i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f64_nxv16i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv16i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv16i8( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i64(double*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i64(,,,,,, double*, , , i64) @@ -74002,26 +12011,23 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1f64_nxv1i64(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i64( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i32(double*, , i64) @@ -74040,104 +12046,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1f64_nxv1i32(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv8i16(double*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv8i16(,,,,,, double*, , , i64) - -define @test_vloxseg6_nxv1f64_nxv8i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv8i16(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1f64_nxv8i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv8i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv8i16( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv4i8(double*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv4i8(,,,,,, double*, , , i64) - -define @test_vloxseg6_nxv1f64_nxv4i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv4i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f64_nxv4i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv4i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv4i8( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i16(double*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16(,,,,,, double*, , , i64) @@ -74154,256 +12081,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1f64_nxv1i16(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv2i32(double*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv2i32(,,,,,, double*, , , i64) - -define @test_vloxseg6_nxv1f64_nxv2i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv2i32(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1f64_nxv2i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv2i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv2i32( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv8i8(double*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv8i8(,,,,,, double*, , , i64) - -define @test_vloxseg6_nxv1f64_nxv8i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv8i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f64_nxv8i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv8i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv8i8( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv4i64(double*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv4i64(,,,,,, double*, , , i64) - -define @test_vloxseg6_nxv1f64_nxv4i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv4i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f64_nxv4i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv4i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv4i64( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv64i8(double*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv64i8(,,,,,, double*, , , i64) - -define @test_vloxseg6_nxv1f64_nxv64i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv64i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f64_nxv64i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv64i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv64i8( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv4i16(double*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv4i16(,,,,,, double*, , , i64) - -define @test_vloxseg6_nxv1f64_nxv4i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv4i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f64_nxv4i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv4i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv4i16( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv8i64(double*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv8i64(,,,,,, double*, , , i64) - -define @test_vloxseg6_nxv1f64_nxv8i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv8i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f64_nxv8i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv8i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv8i64( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i8(double*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8(,,,,,, double*, , , i64) @@ -74420,412 +12116,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1f64_nxv1i8(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv2i8(double*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv2i8(,,,,,, double*, , , i64) - -define @test_vloxseg6_nxv1f64_nxv2i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv2i8(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1f64_nxv2i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv2i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv2i8( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv8i32(double*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv8i32(,,,,,, double*, , , i64) - -define @test_vloxseg6_nxv1f64_nxv8i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv8i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f64_nxv8i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv8i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv8i32( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv32i8(double*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv32i8(,,,,,, double*, , , i64) - -define @test_vloxseg6_nxv1f64_nxv32i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv32i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f64_nxv32i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv32i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv32i8( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv16i32(double*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv16i32(,,,,,, double*, , , i64) - -define @test_vloxseg6_nxv1f64_nxv16i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv16i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f64_nxv16i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv16i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv16i32( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv2i16(double*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv2i16(,,,,,, double*, , , i64) - -define @test_vloxseg6_nxv1f64_nxv2i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv2i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f64_nxv2i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv2i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv2i16( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv2i64(double*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv2i64(,,,,,, double*, , , i64) - -define @test_vloxseg6_nxv1f64_nxv2i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv2i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f64_nxv2i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv2i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv2i64( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv16i16(double*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv16i16(,,,,,,, double*, , , i64) - -define @test_vloxseg7_nxv1f64_nxv16i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv16i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f64_nxv16i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv16i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv16i16( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv32i16(double*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv32i16(,,,,,,, double*, , , i64) - -define @test_vloxseg7_nxv1f64_nxv32i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv32i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f64_nxv32i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv32i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv32i16( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv4i32(double*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv4i32(,,,,,,, double*, , , i64) - -define @test_vloxseg7_nxv1f64_nxv4i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv4i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f64_nxv4i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv4i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv4i32( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv16i8(double*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv16i8(,,,,,,, double*, , , i64) - -define @test_vloxseg7_nxv1f64_nxv16i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv16i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f64_nxv16i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv16i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv16i8( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i64(double*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i64(,,,,,,, double*, , , i64) @@ -74842,11 +12151,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1f64_nxv1i64(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -74854,15 +12162,13 @@ define @test_vloxseg7_mask_nxv1f64_nxv1i64(double* %base, ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i64( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i32(double*, , i64) @@ -74881,11 +12187,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1f64_nxv1i32(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -74893,95 +12198,15 @@ define @test_vloxseg7_mask_nxv1f64_nxv1i32(double* %base, ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv8i16(double*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv8i16(,,,,,,, double*, , , i64) - -define @test_vloxseg7_nxv1f64_nxv8i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv8i16(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1f64_nxv8i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv8i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv8i16( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv4i8(double*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv4i8(,,,,,,, double*, , , i64) - -define @test_vloxseg7_nxv1f64_nxv4i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv4i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f64_nxv4i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv4i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv4i8( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i16(double*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16(,,,,,,, double*, , , i64) @@ -74998,11 +12223,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1f64_nxv1i16(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -75010,251 +12234,15 @@ define @test_vloxseg7_mask_nxv1f64_nxv1i16(double* %base, ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv2i32(double*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv2i32(,,,,,,, double*, , , i64) - -define @test_vloxseg7_nxv1f64_nxv2i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv2i32(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1f64_nxv2i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv2i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv2i32( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv8i8(double*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv8i8(,,,,,,, double*, , , i64) - -define @test_vloxseg7_nxv1f64_nxv8i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv8i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f64_nxv8i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv8i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv8i8( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv4i64(double*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv4i64(,,,,,,, double*, , , i64) - -define @test_vloxseg7_nxv1f64_nxv4i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv4i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f64_nxv4i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv4i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv4i64( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv64i8(double*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv64i8(,,,,,,, double*, , , i64) - -define @test_vloxseg7_nxv1f64_nxv64i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv64i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f64_nxv64i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv64i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv64i8( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv4i16(double*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv4i16(,,,,,,, double*, , , i64) - -define @test_vloxseg7_nxv1f64_nxv4i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv4i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f64_nxv4i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv4i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv4i16( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv8i64(double*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv8i64(,,,,,,, double*, , , i64) - -define @test_vloxseg7_nxv1f64_nxv8i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv8i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f64_nxv8i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv8i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv8i64( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i8(double*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8(,,,,,,, double*, , , i64) @@ -75271,11 +12259,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1f64_nxv1i8(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -75283,411 +12270,15 @@ define @test_vloxseg7_mask_nxv1f64_nxv1i8(double* %base, < ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv2i8(double*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv2i8(,,,,,,, double*, , , i64) - -define @test_vloxseg7_nxv1f64_nxv2i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv2i8(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1f64_nxv2i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv2i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv2i8( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv8i32(double*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv8i32(,,,,,,, double*, , , i64) - -define @test_vloxseg7_nxv1f64_nxv8i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv8i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f64_nxv8i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv8i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv8i32( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv32i8(double*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv32i8(,,,,,,, double*, , , i64) - -define @test_vloxseg7_nxv1f64_nxv32i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv32i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f64_nxv32i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv32i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv32i8( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv16i32(double*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv16i32(,,,,,,, double*, , , i64) - -define @test_vloxseg7_nxv1f64_nxv16i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv16i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f64_nxv16i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv16i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv16i32( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv2i16(double*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv2i16(,,,,,,, double*, , , i64) - -define @test_vloxseg7_nxv1f64_nxv2i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv2i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f64_nxv2i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv2i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv2i16( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv2i64(double*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv2i64(,,,,,,, double*, , , i64) - -define @test_vloxseg7_nxv1f64_nxv2i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv2i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f64_nxv2i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv2i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv2i64( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv16i16(double*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv16i16(,,,,,,,, double*, , , i64) - -define @test_vloxseg8_nxv1f64_nxv16i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv16i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f64_nxv16i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv16i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv32i16(double*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv32i16(,,,,,,,, double*, , , i64) - -define @test_vloxseg8_nxv1f64_nxv32i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv32i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f64_nxv32i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv32i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv4i32(double*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv4i32(,,,,,,,, double*, , , i64) - -define @test_vloxseg8_nxv1f64_nxv4i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv4i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f64_nxv4i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv4i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv16i8(double*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv16i8(,,,,,,,, double*, , , i64) - -define @test_vloxseg8_nxv1f64_nxv16i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv16i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f64_nxv16i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv16i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i64(double*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i64(,,,,,,,, double*, , , i64) @@ -75704,28 +12295,25 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1f64_nxv1i64(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i32(double*, , i64) @@ -75744,51 +12332,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1f64_nxv1i32(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv8i16(double*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv8i16(,,,,,,,, double*, , , i64) - -define @test_vloxseg8_nxv1f64_nxv8i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv8i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f64_nxv8i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -75797,57 +12344,15 @@ define @test_vloxseg8_mask_nxv1f64_nxv8i16(double* %base, ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv8i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv4i8(double*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv4i8(,,,,,,,, double*, , , i64) - -define @test_vloxseg8_nxv1f64_nxv4i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv4i8(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv1f64_nxv4i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv4i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i16(double*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16(,,,,,,,, double*, , , i64) @@ -75864,270 +12369,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1f64_nxv1i16(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv2i32(double*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv2i32(,,,,,,,, double*, , , i64) - -define @test_vloxseg8_nxv1f64_nxv2i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv2i32(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv1f64_nxv2i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv2i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv8i8(double*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv8i8(,,,,,,,, double*, , , i64) - -define @test_vloxseg8_nxv1f64_nxv8i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv8i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f64_nxv8i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv8i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv4i64(double*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv4i64(,,,,,,,, double*, , , i64) - -define @test_vloxseg8_nxv1f64_nxv4i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv4i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f64_nxv4i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv4i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv64i8(double*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv64i8(,,,,,,,, double*, , , i64) - -define @test_vloxseg8_nxv1f64_nxv64i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv64i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f64_nxv64i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv64i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv4i16(double*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv4i16(,,,,,,,, double*, , , i64) - -define @test_vloxseg8_nxv1f64_nxv4i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv4i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f64_nxv4i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv4i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv8i64(double*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv8i64(,,,,,,,, double*, , , i64) - -define @test_vloxseg8_nxv1f64_nxv8i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv8i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f64_nxv8i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv8i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i8(double*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8(,,,,,,,, double*, , , i64) @@ -76144,251 +12406,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1f64_nxv1i8(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv2i8(double*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv2i8(,,,,,,,, double*, , , i64) - -define @test_vloxseg8_nxv1f64_nxv2i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv2i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f64_nxv2i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv2i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv8i32(double*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv8i32(,,,,,,,, double*, , , i64) - -define @test_vloxseg8_nxv1f64_nxv8i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv8i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f64_nxv8i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv8i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv32i8(double*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv32i8(,,,,,,,, double*, , , i64) - -define @test_vloxseg8_nxv1f64_nxv32i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv32i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f64_nxv32i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv32i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv16i32(double*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv16i32(,,,,,,,, double*, , , i64) - -define @test_vloxseg8_nxv1f64_nxv16i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv16i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f64_nxv16i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv16i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv2i16(double*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv2i16(,,,,,,,, double*, , , i64) - -define @test_vloxseg8_nxv1f64_nxv2i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv2i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f64_nxv2i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv2i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv2i64(double*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv2i64(,,,,,,,, double*, , , i64) - -define @test_vloxseg8_nxv1f64_nxv2i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv2i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f64_nxv2i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -76397,321 +12418,13 @@ define @test_vloxseg8_mask_nxv1f64_nxv2i64(double* %base, ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8, v0.t +; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv2i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv16i16(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv16i16(,, float*, , , i64) - -define @test_vloxseg2_nxv2f32_nxv16i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv16i16( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv32i16(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv32i16(,, float*, , , i64) - -define @test_vloxseg2_nxv2f32_nxv32i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv32i16( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv4i32(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv4i32(,, float*, , , i64) - -define @test_vloxseg2_nxv2f32_nxv4i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv4i32( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv16i8(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv16i8(,, float*, , , i64) - -define @test_vloxseg2_nxv2f32_nxv16i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv16i8( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv1i64(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv1i64(,, float*, , , i64) - -define @test_vloxseg2_nxv2f32_nxv1i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv1i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv1i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv1i64( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv1i32(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv1i32(,, float*, , , i64) - -define @test_vloxseg2_nxv2f32_nxv1i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv1i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv1i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv1i32( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv8i16(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv8i16(,, float*, , , i64) - -define @test_vloxseg2_nxv2f32_nxv8i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv8i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv8i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv8i16( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv4i8(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv4i8(,, float*, , , i64) - -define @test_vloxseg2_nxv2f32_nxv4i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv4i8( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv1i16(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv1i16(,, float*, , , i64) - -define @test_vloxseg2_nxv2f32_nxv1i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv1i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv1i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv1i16( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i32(float*, , i64) @@ -76730,228 +12443,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv8i8(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv8i8(,, float*, , , i64) - -define @test_vloxseg2_nxv2f32_nxv8i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv8i8(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32( %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv8i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv8i8( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv4i64(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv4i64(,, float*, , , i64) - -define @test_vloxseg2_nxv2f32_nxv4i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv4i64( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv64i8(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv64i8(,, float*, , , i64) - -define @test_vloxseg2_nxv2f32_nxv64i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv64i8( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv4i16(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv4i16(,, float*, , , i64) - -define @test_vloxseg2_nxv2f32_nxv4i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv4i16( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv8i64(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv8i64(,, float*, , , i64) - -define @test_vloxseg2_nxv2f32_nxv8i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv8i64( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv1i8(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv1i8(,, float*, , , i64) - -define @test_vloxseg2_nxv2f32_nxv1i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv1i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv1i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv1i8( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i8(float*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8(,, float*, , , i64) @@ -76968,126 +12473,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv8i32(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv8i32(,, float*, , , i64) - -define @test_vloxseg2_nxv2f32_nxv8i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv8i32(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8( %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv8i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv8i32( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv32i8(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv32i8(,, float*, , , i64) - -define @test_vloxseg2_nxv2f32_nxv32i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv32i8( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv16i32(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv16i32(,, float*, , , i64) - -define @test_vloxseg2_nxv2f32_nxv16i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv16i32( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i16(float*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16(,, float*, , , i64) @@ -77104,22 +12503,18 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16( %val, %val, float* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i64(float*, , i64) @@ -77138,339 +12533,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i64( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv16i16(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv16i16(,,, float*, , , i64) - -define @test_vloxseg3_nxv2f32_nxv16i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i64( %val, %val, float* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv16i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv32i16(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv32i16(,,, float*, , , i64) - -define @test_vloxseg3_nxv2f32_nxv32i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv32i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv4i32(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv4i32(,,, float*, , , i64) - -define @test_vloxseg3_nxv2f32_nxv4i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv4i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv16i8(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv16i8(,,, float*, , , i64) - -define @test_vloxseg3_nxv2f32_nxv16i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv16i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv1i64(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv1i64(,,, float*, , , i64) - -define @test_vloxseg3_nxv2f32_nxv1i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv1i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv1i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv1i64( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv1i32(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv1i32(,,, float*, , , i64) - -define @test_vloxseg3_nxv2f32_nxv1i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv1i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv1i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv1i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv8i16(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv8i16(,,, float*, , , i64) - -define @test_vloxseg3_nxv2f32_nxv8i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv8i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv8i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv8i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv4i8(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv4i8(,,, float*, , , i64) - -define @test_vloxseg3_nxv2f32_nxv4i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv4i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv1i16(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv1i16(,,, float*, , , i64) - -define @test_vloxseg3_nxv2f32_nxv1i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv1i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv1i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv1i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i32(float*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32(,,, float*, , , i64) @@ -77487,235 +12563,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv8i8(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv8i8(,,, float*, , , i64) - -define @test_vloxseg3_nxv2f32_nxv8i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv8i8(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv8i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv8i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv4i64(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv4i64(,,, float*, , , i64) - -define @test_vloxseg3_nxv2f32_nxv4i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv4i64( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv64i8(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv64i8(,,, float*, , , i64) - -define @test_vloxseg3_nxv2f32_nxv64i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv64i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv4i16(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv4i16(,,, float*, , , i64) - -define @test_vloxseg3_nxv2f32_nxv4i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv4i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv8i64(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv8i64(,,, float*, , , i64) - -define @test_vloxseg3_nxv2f32_nxv8i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv8i64( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv1i8(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv1i8(,,, float*, , , i64) - -define @test_vloxseg3_nxv2f32_nxv1i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv1i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv1i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv1i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i8(float*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8(,,, float*, , , i64) @@ -77732,130 +12595,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv8i32(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv8i32(,,, float*, , , i64) - -define @test_vloxseg3_nxv2f32_nxv8i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv8i32(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv8i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv8i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv32i8(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv32i8(,,, float*, , , i64) - -define @test_vloxseg3_nxv2f32_nxv32i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv32i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv16i32(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv16i32(,,, float*, , , i64) - -define @test_vloxseg3_nxv2f32_nxv16i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv16i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i16(float*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16(,,, float*, , , i64) @@ -77872,23 +12627,20 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i64(float*, , i64) @@ -77907,349 +12659,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i64( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv16i16(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv16i16(,,,, float*, , , i64) - -define @test_vloxseg4_nxv2f32_nxv16i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv16i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv32i16(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv32i16(,,,, float*, , , i64) - -define @test_vloxseg4_nxv2f32_nxv32i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv32i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv4i32(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv4i32(,,,, float*, , , i64) - -define @test_vloxseg4_nxv2f32_nxv4i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv4i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv16i8(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv16i8(,,,, float*, , , i64) - -define @test_vloxseg4_nxv2f32_nxv16i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv16i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv1i64(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv1i64(,,,, float*, , , i64) - -define @test_vloxseg4_nxv2f32_nxv1i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv1i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv1i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv1i64( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv1i32(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv1i32(,,,, float*, , , i64) - -define @test_vloxseg4_nxv2f32_nxv1i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv1i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv1i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv1i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv8i16(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv8i16(,,,, float*, , , i64) - -define @test_vloxseg4_nxv2f32_nxv8i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv8i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv8i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv8i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv4i8(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv4i8(,,,, float*, , , i64) - -define @test_vloxseg4_nxv2f32_nxv4i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv4i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv1i16(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv1i16(,,,, float*, , , i64) - -define @test_vloxseg4_nxv2f32_nxv1i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv1i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv1i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv1i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i32(float*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32(,,,, float*, , , i64) @@ -78266,242 +12690,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv8i8(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv8i8(,,,, float*, , , i64) - -define @test_vloxseg4_nxv2f32_nxv8i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv8i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv8i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv8i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv4i64(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv4i64(,,,, float*, , , i64) - -define @test_vloxseg4_nxv2f32_nxv4i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv4i64( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv64i8(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv64i8(,,,, float*, , , i64) - -define @test_vloxseg4_nxv2f32_nxv64i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv64i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv4i16(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv4i16(,,,, float*, , , i64) - -define @test_vloxseg4_nxv2f32_nxv4i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv4i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv8i64(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv8i64(,,,, float*, , , i64) - -define @test_vloxseg4_nxv2f32_nxv8i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv8i64( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv1i8(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv1i8(,,,, float*, , , i64) - -define @test_vloxseg4_nxv2f32_nxv1i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv1i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv1i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv1i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i8(float*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8(,,,, float*, , , i64) @@ -78518,134 +12723,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv8i32(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv8i32(,,,, float*, , , i64) - -define @test_vloxseg4_nxv2f32_nxv8i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv8i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv8i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv8i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv32i8(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv32i8(,,,, float*, , , i64) - -define @test_vloxseg4_nxv2f32_nxv32i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv32i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv16i32(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv16i32(,,,, float*, , , i64) - -define @test_vloxseg4_nxv2f32_nxv16i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv16i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i16(float*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16(,,,, float*, , , i64) @@ -78662,24 +12756,21 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i64(float*, , i64) @@ -78698,359 +12789,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i64( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv16i16(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv16i16(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv2f32_nxv16i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv16i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv32i16(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv32i16(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv2f32_nxv32i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv32i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv4i32(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv4i32(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv2f32_nxv4i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv4i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv16i8(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv16i8(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv2f32_nxv16i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv16i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv1i64(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv1i64(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv2f32_nxv1i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv1i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv1i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv1i64( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv1i32(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv1i32(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv2f32_nxv1i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv1i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv1i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv1i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv8i16(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv8i16(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv2f32_nxv8i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv8i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv8i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv8i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv4i8(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv4i8(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv2f32_nxv4i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv4i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv1i16(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv1i16(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv2f32_nxv1i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv1i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv1i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv1i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i32(float*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32(,,,,, float*, , , i64) @@ -79067,249 +12822,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv8i8(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv8i8(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv2f32_nxv8i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv8i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv8i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv8i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv4i64(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv4i64(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv2f32_nxv4i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv4i64( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv64i8(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv64i8(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv2f32_nxv64i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv64i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv4i16(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv4i16(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv2f32_nxv4i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv4i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv8i64(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv8i64(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv2f32_nxv8i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv8i64( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv1i8(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv1i8(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv2f32_nxv1i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv1i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv1i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv1i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i8(float*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8(,,,,, float*, , , i64) @@ -79326,138 +12856,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv8i32(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv8i32(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv2f32_nxv8i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv8i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv8i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv8i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv32i8(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv32i8(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv2f32_nxv32i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv32i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv16i32(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv16i32(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv2f32_nxv16i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv16i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i16(float*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16(,,,,, float*, , , i64) @@ -79474,25 +12890,22 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i64(float*, , i64) @@ -79511,369 +12924,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i64( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv16i16(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv16i16(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv2f32_nxv16i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv16i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv32i16(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv32i16(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv2f32_nxv32i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv32i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv4i32(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv4i32(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv2f32_nxv4i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv4i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv16i8(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv16i8(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv2f32_nxv16i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv16i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv1i64(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv1i64(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv2f32_nxv1i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv1i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv1i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv1i64( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv1i32(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv1i32(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv2f32_nxv1i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv1i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv1i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv1i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv8i16(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv8i16(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv2f32_nxv8i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv8i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv8i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv8i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv4i8(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv4i8(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv2f32_nxv4i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv4i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv1i16(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv1i16(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv2f32_nxv1i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv1i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv1i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv1i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i32(float*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32(,,,,,, float*, , , i64) @@ -79890,256 +12958,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv8i8(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv8i8(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv2f32_nxv8i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv8i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv8i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv8i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv4i64(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv4i64(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv2f32_nxv4i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv4i64( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv64i8(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv64i8(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv2f32_nxv64i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv64i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv4i16(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv4i16(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv2f32_nxv4i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv4i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv8i64(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv8i64(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv2f32_nxv8i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv8i64( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv1i8(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv1i8(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv2f32_nxv1i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv1i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv1i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv1i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i8(float*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8(,,,,,, float*, , , i64) @@ -80156,142 +12993,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv8i32(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv8i32(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv2f32_nxv8i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv8i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv8i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv8i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv32i8(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv32i8(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv2f32_nxv32i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv32i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv16i32(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv16i32(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv2f32_nxv16i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv16i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i16(float*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16(,,,,,, float*, , , i64) @@ -80308,26 +13028,23 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i64(float*, , i64) @@ -80346,379 +13063,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i64( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv16i16(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv16i16(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv2f32_nxv16i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv16i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv32i16(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv32i16(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv2f32_nxv32i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv32i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv4i32(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv4i32(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv2f32_nxv4i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv4i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv16i8(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv16i8(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv2f32_nxv16i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv16i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv1i64(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv1i64(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv2f32_nxv1i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv1i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv1i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv1i64( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv1i32(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv1i32(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv2f32_nxv1i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv1i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv1i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv1i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv8i16(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv8i16(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv2f32_nxv8i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv8i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv8i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv8i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv4i8(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv4i8(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv2f32_nxv4i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv4i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv1i16(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv1i16(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv2f32_nxv1i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv1i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv1i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv1i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i32(float*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32(,,,,,,, float*, , , i64) @@ -80735,11 +13098,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -80747,251 +13109,15 @@ define @test_vloxseg7_mask_nxv2f32_nxv2i32(float* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv8i8(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv8i8(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv2f32_nxv8i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv8i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv8i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv8i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv4i64(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv4i64(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv2f32_nxv4i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv4i64( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv64i8(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv64i8(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv2f32_nxv64i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv64i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv4i16(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv4i16(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv2f32_nxv4i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv4i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv8i64(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv8i64(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv2f32_nxv8i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv8i64( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv1i8(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv1i8(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv2f32_nxv1i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv1i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv1i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv1i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i8(float*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8(,,,,,,, float*, , , i64) @@ -81008,11 +13134,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -81020,134 +13145,15 @@ define @test_vloxseg7_mask_nxv2f32_nxv2i8(float* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv8i32(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv8i32(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv2f32_nxv8i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv8i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv8i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv8i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv32i8(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv32i8(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv2f32_nxv32i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv32i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv16i32(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv16i32(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv2f32_nxv16i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv16i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i16(float*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16(,,,,,,, float*, , , i64) @@ -81164,11 +13170,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -81176,15 +13181,13 @@ define @test_vloxseg7_mask_nxv2f32_nxv2i16(float* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i64(float*, , i64) @@ -81203,11 +13206,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -81215,377 +13217,15 @@ define @test_vloxseg7_mask_nxv2f32_nxv2i64(float* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i64( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv16i16(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv16i16(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv2f32_nxv16i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv2f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv32i16(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv32i16(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv2f32_nxv32i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv4i32(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv4i32(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv2f32_nxv4i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv16i8(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv16i8(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv2f32_nxv16i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv1i64(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv1i64(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv2f32_nxv1i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv1i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv1i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv1i32(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv1i32(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv2f32_nxv1i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv1i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv1i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv8i16(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv8i16(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv2f32_nxv8i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv8i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv8i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv4i8(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv4i8(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv2f32_nxv4i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv1i16(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv1i16(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv2f32_nxv1i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv1i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv1i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i32(float*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32(,,,,,,,, float*, , , i64) @@ -81602,270 +13242,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv8i8(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv8i8(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv2f32_nxv8i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv8i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv2f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv8i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv4i64(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv4i64(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv2f32_nxv4i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv64i8(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv64i8(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv2f32_nxv64i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv4i16(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv4i16(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv2f32_nxv4i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv8i64(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv8i64(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv2f32_nxv8i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv1i8(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv1i8(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv2f32_nxv1i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv1i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv1i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i8(float*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8(,,,,,,,, float*, , , i64) @@ -81882,150 +13279,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv8i32(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv8i32(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv2f32_nxv8i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv8i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv2f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv8i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv32i8(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv32i8(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv2f32_nxv32i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv16i32(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv16i32(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv2f32_nxv16i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i16(float*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16(,,,,,,,, float*, , , i64) @@ -82042,28 +13316,25 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i64(float*, , i64) @@ -82082,164 +13353,25 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv16i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv16i16(,, half*, , , i64) - -define @test_vloxseg2_nxv1f16_nxv16i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv16i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv32i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv32i16(,, half*, , , i64) - -define @test_vloxseg2_nxv1f16_nxv32i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv32i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv4i32(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv4i32(,, half*, , , i64) - -define @test_vloxseg2_nxv1f16_nxv4i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv4i32( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv16i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv16i8(,, half*, , , i64) - -define @test_vloxseg2_nxv1f16_nxv16i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv16i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i64(half*, , i64) @@ -82258,22 +13390,18 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i64( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i64( %val, %val, half* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i32(half*, , i64) @@ -82292,92 +13420,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv8i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv8i16(,, half*, , , i64) - -define @test_vloxseg2_nxv1f16_nxv8i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv8i16(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32( %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv8i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv4i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv4i8(,, half*, , , i64) - -define @test_vloxseg2_nxv1f16_nxv4i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv4i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i16(half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16(,, half*, , , i64) @@ -82394,228 +13450,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv2i32(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv2i32(,, half*, , , i64) - -define @test_vloxseg2_nxv1f16_nxv2i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv2i32(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16( %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv2i32( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv8i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv8i8(,, half*, , , i64) - -define @test_vloxseg2_nxv1f16_nxv8i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv8i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv4i64(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv4i64(,, half*, , , i64) - -define @test_vloxseg2_nxv1f16_nxv4i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv4i64( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv64i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv64i8(,, half*, , , i64) - -define @test_vloxseg2_nxv1f16_nxv64i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv64i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv4i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv4i16(,, half*, , , i64) - -define @test_vloxseg2_nxv1f16_nxv4i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv4i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv8i64(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv8i64(,, half*, , , i64) - -define @test_vloxseg2_nxv1f16_nxv8i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv8i64( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i8(half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8(,, half*, , , i64) @@ -82632,368 +13480,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv2i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv2i8(,, half*, , , i64) - -define @test_vloxseg2_nxv1f16_nxv2i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv2i8(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8( %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv2i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv8i32(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv8i32(,, half*, , , i64) - -define @test_vloxseg2_nxv1f16_nxv8i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv8i32( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv32i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv32i8(,, half*, , , i64) - -define @test_vloxseg2_nxv1f16_nxv32i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv32i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv16i32(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv16i32(,, half*, , , i64) - -define @test_vloxseg2_nxv1f16_nxv16i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv16i32( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv2i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv2i16(,, half*, , , i64) - -define @test_vloxseg2_nxv1f16_nxv2i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv2i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv2i64(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv2i64(,, half*, , , i64) - -define @test_vloxseg2_nxv1f16_nxv2i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv2i64( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv16i16(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv16i16(,,, half*, , , i64) - -define @test_vloxseg3_nxv1f16_nxv16i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv16i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv32i16(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv32i16(,,, half*, , , i64) - -define @test_vloxseg3_nxv1f16_nxv32i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv32i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv4i32(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv4i32(,,, half*, , , i64) - -define @test_vloxseg3_nxv1f16_nxv4i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv4i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv16i8(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv16i8(,,, half*, , , i64) - -define @test_vloxseg3_nxv1f16_nxv16i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv16i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i64(half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i64(,,, half*, , , i64) @@ -83010,23 +13510,20 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i32(half*, , i64) @@ -83045,95 +13542,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv8i16(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv8i16(,,, half*, , , i64) - -define @test_vloxseg3_nxv1f16_nxv8i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv8i16(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv8i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv4i8(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv4i8(,,, half*, , , i64) - -define @test_vloxseg3_nxv1f16_nxv4i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv4i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i16(half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16(,,, half*, , , i64) @@ -83150,235 +13574,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv2i32(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv2i32(,,, half*, , , i64) - -define @test_vloxseg3_nxv1f16_nxv2i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv2i32(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv2i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv8i8(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv8i8(,,, half*, , , i64) - -define @test_vloxseg3_nxv1f16_nxv8i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv8i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv4i64(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv4i64(,,, half*, , , i64) - -define @test_vloxseg3_nxv1f16_nxv4i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv4i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv64i8(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv64i8(,,, half*, , , i64) - -define @test_vloxseg3_nxv1f16_nxv64i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv64i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv4i16(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv4i16(,,, half*, , , i64) - -define @test_vloxseg3_nxv1f16_nxv4i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv4i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv8i64(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv8i64(,,, half*, , , i64) - -define @test_vloxseg3_nxv1f16_nxv8i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv8i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i8(half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8(,,, half*, , , i64) @@ -83395,379 +13606,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv2i8(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv2i8(,,, half*, , , i64) - -define @test_vloxseg3_nxv1f16_nxv2i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv2i8(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv2i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv8i32(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv8i32(,,, half*, , , i64) - -define @test_vloxseg3_nxv1f16_nxv8i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv8i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv32i8(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv32i8(,,, half*, , , i64) - -define @test_vloxseg3_nxv1f16_nxv32i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv32i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv16i32(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv16i32(,,, half*, , , i64) - -define @test_vloxseg3_nxv1f16_nxv16i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv16i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv2i16(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv2i16(,,, half*, , , i64) - -define @test_vloxseg3_nxv1f16_nxv2i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv2i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv2i64(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv2i64(,,, half*, , , i64) - -define @test_vloxseg3_nxv1f16_nxv2i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv2i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv16i16(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv16i16(,,,, half*, , , i64) - -define @test_vloxseg4_nxv1f16_nxv16i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv16i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv32i16(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv32i16(,,,, half*, , , i64) - -define @test_vloxseg4_nxv1f16_nxv32i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv32i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv4i32(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv4i32(,,,, half*, , , i64) - -define @test_vloxseg4_nxv1f16_nxv4i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv4i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv16i8(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv16i8(,,,, half*, , , i64) - -define @test_vloxseg4_nxv1f16_nxv16i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv16i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i64(half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i64(,,,, half*, , , i64) @@ -83784,24 +13638,21 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i32(half*, , i64) @@ -83820,98 +13671,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv8i16(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv8i16(,,,, half*, , , i64) - -define @test_vloxseg4_nxv1f16_nxv8i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv8i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv8i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv4i8(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv4i8(,,,, half*, , , i64) - -define @test_vloxseg4_nxv1f16_nxv4i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv4i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i16(half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16(,,,, half*, , , i64) @@ -83928,242 +13704,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv2i32(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv2i32(,,,, half*, , , i64) - -define @test_vloxseg4_nxv1f16_nxv2i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv2i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv2i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv8i8(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv8i8(,,,, half*, , , i64) - -define @test_vloxseg4_nxv1f16_nxv8i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv8i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv4i64(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv4i64(,,,, half*, , , i64) - -define @test_vloxseg4_nxv1f16_nxv4i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv4i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv64i8(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv64i8(,,,, half*, , , i64) - -define @test_vloxseg4_nxv1f16_nxv64i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv64i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv4i16(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv4i16(,,,, half*, , , i64) - -define @test_vloxseg4_nxv1f16_nxv4i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv4i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv8i64(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv8i64(,,,, half*, , , i64) - -define @test_vloxseg4_nxv1f16_nxv8i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv8i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i8(half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8(,,,, half*, , , i64) @@ -84180,390 +13737,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv2i8(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv2i8(,,,, half*, , , i64) - -define @test_vloxseg4_nxv1f16_nxv2i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv2i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv2i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv8i32(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv8i32(,,,, half*, , , i64) - -define @test_vloxseg4_nxv1f16_nxv8i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv8i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv32i8(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv32i8(,,,, half*, , , i64) - -define @test_vloxseg4_nxv1f16_nxv32i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv32i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv16i32(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv16i32(,,,, half*, , , i64) - -define @test_vloxseg4_nxv1f16_nxv16i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv16i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv2i16(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv2i16(,,,, half*, , , i64) - -define @test_vloxseg4_nxv1f16_nxv2i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv2i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv2i64(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv2i64(,,,, half*, , , i64) - -define @test_vloxseg4_nxv1f16_nxv2i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv2i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv16i16(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv16i16(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv1f16_nxv16i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv16i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv32i16(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv32i16(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv1f16_nxv32i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv32i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv4i32(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv4i32(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv1f16_nxv4i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv4i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv16i8(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv16i8(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv1f16_nxv16i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv16i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i64(half*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i64(,,,,, half*, , , i64) @@ -84580,25 +13770,22 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i64( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i32(half*, , i64) @@ -84617,101 +13804,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv8i16(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv8i16(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv1f16_nxv8i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv8i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv8i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv4i8(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv4i8(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv1f16_nxv4i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv4i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i16(half*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16(,,,,, half*, , , i64) @@ -84728,249 +13838,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv2i32(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv2i32(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv1f16_nxv2i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv2i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv2i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv8i8(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv8i8(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv1f16_nxv8i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv8i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv4i64(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv4i64(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv1f16_nxv4i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv4i64( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv64i8(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv64i8(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv1f16_nxv64i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv64i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv4i16(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv4i16(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv1f16_nxv4i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv4i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv8i64(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv8i64(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv1f16_nxv8i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv8i64( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i8(half*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8(,,,,, half*, , , i64) @@ -84987,401 +13872,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv2i8(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv2i8(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv1f16_nxv2i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv2i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv2i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv8i32(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv8i32(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv1f16_nxv8i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv8i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv32i8(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv32i8(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv1f16_nxv32i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv32i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv16i32(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv16i32(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv1f16_nxv16i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv16i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv2i16(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv2i16(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv1f16_nxv2i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv2i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv2i64(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv2i64(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv1f16_nxv2i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv2i64( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv16i16(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv16i16(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv1f16_nxv16i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv16i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv32i16(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv32i16(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv1f16_nxv32i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv32i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv4i32(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv4i32(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv1f16_nxv4i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv4i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv16i8(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv16i8(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv1f16_nxv16i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv16i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i64(half*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i64(,,,,,, half*, , , i64) @@ -85398,26 +13906,23 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i64( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i32(half*, , i64) @@ -85436,104 +13941,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv8i16(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv8i16(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv1f16_nxv8i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv8i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv8i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv4i8(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv4i8(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv1f16_nxv4i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv4i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i16(half*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16(,,,,,, half*, , , i64) @@ -85550,256 +13976,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv2i32(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv2i32(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv1f16_nxv2i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv2i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv2i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv8i8(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv8i8(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv1f16_nxv8i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv8i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv4i64(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv4i64(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv1f16_nxv4i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv4i64( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv64i8(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv64i8(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv1f16_nxv64i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv64i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv4i16(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv4i16(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv1f16_nxv4i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv4i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv8i64(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv8i64(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv1f16_nxv8i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv8i64( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i8(half*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8(,,,,,, half*, , , i64) @@ -85816,412 +14011,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv2i8(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv2i8(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv1f16_nxv2i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv2i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv2i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv8i32(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv8i32(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv1f16_nxv8i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv8i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv32i8(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv32i8(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv1f16_nxv32i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv32i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv16i32(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv16i32(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv1f16_nxv16i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv16i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv2i16(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv2i16(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv1f16_nxv2i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv2i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv2i64(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv2i64(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv1f16_nxv2i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv2i64( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv16i16(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv16i16(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv1f16_nxv16i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv32i16(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv32i16(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv1f16_nxv32i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv4i32(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv4i32(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv1f16_nxv4i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv16i8(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv16i8(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv1f16_nxv16i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i64(half*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i64(,,,,,,, half*, , , i64) @@ -86238,11 +14046,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -86250,15 +14057,13 @@ define @test_vloxseg7_mask_nxv1f16_nxv1i64(half* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i64( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i32(half*, , i64) @@ -86277,11 +14082,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -86289,95 +14093,15 @@ define @test_vloxseg7_mask_nxv1f16_nxv1i32(half* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv8i16(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv8i16(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv1f16_nxv8i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv8i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv4i8(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv4i8(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv1f16_nxv4i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i16(half*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16(,,,,,,, half*, , , i64) @@ -86394,11 +14118,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -86406,251 +14129,15 @@ define @test_vloxseg7_mask_nxv1f16_nxv1i16(half* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv2i32(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv2i32(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv1f16_nxv2i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv2i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv8i8(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv8i8(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv1f16_nxv8i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv4i64(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv4i64(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv1f16_nxv4i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv4i64( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv64i8(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv64i8(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv1f16_nxv64i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv4i16(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv4i16(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv1f16_nxv4i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv8i64(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv8i64(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv1f16_nxv8i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv8i64( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i8(half*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8(,,,,,,, half*, , , i64) @@ -86667,11 +14154,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -86679,411 +14165,15 @@ define @test_vloxseg7_mask_nxv1f16_nxv1i8(half* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv2i8(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv2i8(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv1f16_nxv2i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv2i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv8i32(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv8i32(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv1f16_nxv8i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv32i8(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv32i8(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv1f16_nxv32i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv16i32(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv16i32(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv1f16_nxv16i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv2i16(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv2i16(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv1f16_nxv2i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv2i64(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv2i64(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv1f16_nxv2i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv2i64( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv16i16(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv16i16(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv1f16_nxv16i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv32i16(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv32i16(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv1f16_nxv32i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv4i32(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv4i32(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv1f16_nxv4i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv16i8(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv16i8(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv1f16_nxv16i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i64(half*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i64(,,,,,,,, half*, , , i64) @@ -87100,28 +14190,25 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i32(half*, , i64) @@ -87140,51 +14227,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv8i16(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv8i16(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv1f16_nxv8i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -87193,57 +14239,15 @@ define @test_vloxseg8_mask_nxv1f16_nxv8i16(half* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv4i8(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv4i8(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv1f16_nxv4i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv4i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv1f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i16(half*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16(,,,,,,,, half*, , , i64) @@ -87260,270 +14264,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv2i32(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv2i32(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv1f16_nxv2i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv2i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv1f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv8i8(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv8i8(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv1f16_nxv8i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv4i64(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv4i64(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv1f16_nxv4i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv64i8(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv64i8(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv1f16_nxv64i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv4i16(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv4i16(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv1f16_nxv4i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv8i64(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv8i64(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv1f16_nxv8i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i8(half*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8(,,,,,,,, half*, , , i64) @@ -87540,251 +14301,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv2i8(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv2i8(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv1f16_nxv2i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv8i32(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv8i32(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv1f16_nxv8i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv32i8(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv32i8(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv1f16_nxv32i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv16i32(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv16i32(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv1f16_nxv16i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv2i16(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv2i16(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv1f16_nxv2i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv2i64(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv2i64(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv1f16_nxv2i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -87793,151 +14313,13 @@ define @test_vloxseg8_mask_nxv1f16_nxv2i64(half* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv16i16(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv16i16(,, float*, , , i64) - -define @test_vloxseg2_nxv1f32_nxv16i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv16i16( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv32i16(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv32i16(,, float*, , , i64) - -define @test_vloxseg2_nxv1f32_nxv32i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv32i16( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv4i32(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv4i32(,, float*, , , i64) - -define @test_vloxseg2_nxv1f32_nxv4i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv4i32( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv16i8(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv16i8(,, float*, , , i64) - -define @test_vloxseg2_nxv1f32_nxv16i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv16i8( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i64(float*, , i64) @@ -87956,22 +14338,18 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i64( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i64( %val, %val, float* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i32(float*, , i64) @@ -87990,92 +14368,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv8i16(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv8i16(,, float*, , , i64) - -define @test_vloxseg2_nxv1f32_nxv8i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv8i16(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32( %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv8i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv8i16( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv4i8(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv4i8(,, float*, , , i64) - -define @test_vloxseg2_nxv1f32_nxv4i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv4i8( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i16(float*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16(,, float*, , , i64) @@ -88092,228 +14398,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv2i32(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv2i32(,, float*, , , i64) - -define @test_vloxseg2_nxv1f32_nxv2i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv2i32(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16( %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv2i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv2i32( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv8i8(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv8i8(,, float*, , , i64) - -define @test_vloxseg2_nxv1f32_nxv8i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv8i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv8i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv8i8( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv4i64(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv4i64(,, float*, , , i64) - -define @test_vloxseg2_nxv1f32_nxv4i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv4i64( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv64i8(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv64i8(,, float*, , , i64) - -define @test_vloxseg2_nxv1f32_nxv64i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv64i8( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv4i16(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv4i16(,, float*, , , i64) - -define @test_vloxseg2_nxv1f32_nxv4i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv4i16( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv8i64(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv8i64(,, float*, , , i64) - -define @test_vloxseg2_nxv1f32_nxv8i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv8i64( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i8(float*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8(,, float*, , , i64) @@ -88330,368 +14428,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv2i8(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv2i8(,, float*, , , i64) - -define @test_vloxseg2_nxv1f32_nxv2i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv2i8(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8( %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv2i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv2i8( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv8i32(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv8i32(,, float*, , , i64) - -define @test_vloxseg2_nxv1f32_nxv8i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv8i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv8i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv8i32( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv32i8(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv32i8(,, float*, , , i64) - -define @test_vloxseg2_nxv1f32_nxv32i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv32i8( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv16i32(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv16i32(,, float*, , , i64) - -define @test_vloxseg2_nxv1f32_nxv16i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv16i32( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv2i16(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv2i16(,, float*, , , i64) - -define @test_vloxseg2_nxv1f32_nxv2i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv2i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv2i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv2i16( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv2i64(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv2i64(,, float*, , , i64) - -define @test_vloxseg2_nxv1f32_nxv2i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv2i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv2i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv2i64( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv16i16(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv16i16(,,, float*, , , i64) - -define @test_vloxseg3_nxv1f32_nxv16i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv16i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv32i16(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv32i16(,,, float*, , , i64) - -define @test_vloxseg3_nxv1f32_nxv32i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv32i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv4i32(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv4i32(,,, float*, , , i64) - -define @test_vloxseg3_nxv1f32_nxv4i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv4i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv16i8(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv16i8(,,, float*, , , i64) - -define @test_vloxseg3_nxv1f32_nxv16i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv16i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i64(float*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64(,,, float*, , , i64) @@ -88708,23 +14458,20 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i32(float*, , i64) @@ -88743,95 +14490,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv8i16(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv8i16(,,, float*, , , i64) - -define @test_vloxseg3_nxv1f32_nxv8i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv8i16(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv8i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv8i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv4i8(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv4i8(,,, float*, , , i64) - -define @test_vloxseg3_nxv1f32_nxv4i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv4i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i16(float*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16(,,, float*, , , i64) @@ -88848,235 +14522,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv2i32(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv2i32(,,, float*, , , i64) - -define @test_vloxseg3_nxv1f32_nxv2i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv2i32(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv2i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv2i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv8i8(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv8i8(,,, float*, , , i64) - -define @test_vloxseg3_nxv1f32_nxv8i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv8i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv8i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv8i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv4i64(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv4i64(,,, float*, , , i64) - -define @test_vloxseg3_nxv1f32_nxv4i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv4i64( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv64i8(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv64i8(,,, float*, , , i64) - -define @test_vloxseg3_nxv1f32_nxv64i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv64i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv4i16(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv4i16(,,, float*, , , i64) - -define @test_vloxseg3_nxv1f32_nxv4i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv4i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv8i64(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv8i64(,,, float*, , , i64) - -define @test_vloxseg3_nxv1f32_nxv8i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv8i64( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i8(float*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8(,,, float*, , , i64) @@ -89093,379 +14554,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv2i8(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv2i8(,,, float*, , , i64) - -define @test_vloxseg3_nxv1f32_nxv2i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv2i8(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv2i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv2i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv8i32(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv8i32(,,, float*, , , i64) - -define @test_vloxseg3_nxv1f32_nxv8i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv8i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv8i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv8i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv32i8(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv32i8(,,, float*, , , i64) - -define @test_vloxseg3_nxv1f32_nxv32i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv32i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv16i32(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv16i32(,,, float*, , , i64) - -define @test_vloxseg3_nxv1f32_nxv16i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv16i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv2i16(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv2i16(,,, float*, , , i64) - -define @test_vloxseg3_nxv1f32_nxv2i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv2i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv2i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv2i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv2i64(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv2i64(,,, float*, , , i64) - -define @test_vloxseg3_nxv1f32_nxv2i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv2i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv2i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv2i64( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv16i16(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv16i16(,,,, float*, , , i64) - -define @test_vloxseg4_nxv1f32_nxv16i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv16i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv32i16(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv32i16(,,,, float*, , , i64) - -define @test_vloxseg4_nxv1f32_nxv32i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv32i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv4i32(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv4i32(,,,, float*, , , i64) - -define @test_vloxseg4_nxv1f32_nxv4i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv4i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv16i8(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv16i8(,,,, float*, , , i64) - -define @test_vloxseg4_nxv1f32_nxv16i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv16i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i64(float*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i64(,,,, float*, , , i64) @@ -89482,24 +14586,21 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i64( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i32(float*, , i64) @@ -89518,98 +14619,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv8i16(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv8i16(,,,, float*, , , i64) - -define @test_vloxseg4_nxv1f32_nxv8i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv8i16(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv8i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv8i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv4i8(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv4i8(,,,, float*, , , i64) - -define @test_vloxseg4_nxv1f32_nxv4i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv4i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i16(float*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16(,,,, float*, , , i64) @@ -89626,242 +14652,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv2i32(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv2i32(,,,, float*, , , i64) - -define @test_vloxseg4_nxv1f32_nxv2i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv2i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv2i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv2i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv8i8(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv8i8(,,,, float*, , , i64) - -define @test_vloxseg4_nxv1f32_nxv8i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv8i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv8i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv8i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv4i64(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv4i64(,,,, float*, , , i64) - -define @test_vloxseg4_nxv1f32_nxv4i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv4i64( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv64i8(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv64i8(,,,, float*, , , i64) - -define @test_vloxseg4_nxv1f32_nxv64i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv64i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv4i16(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv4i16(,,,, float*, , , i64) - -define @test_vloxseg4_nxv1f32_nxv4i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv4i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv8i64(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv8i64(,,,, float*, , , i64) - -define @test_vloxseg4_nxv1f32_nxv8i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv8i64( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i8(float*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8(,,,, float*, , , i64) @@ -89878,390 +14685,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv2i8(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv2i8(,,,, float*, , , i64) - -define @test_vloxseg4_nxv1f32_nxv2i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv2i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv2i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv2i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv8i32(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv8i32(,,,, float*, , , i64) - -define @test_vloxseg4_nxv1f32_nxv8i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv8i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv8i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv8i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv32i8(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv32i8(,,,, float*, , , i64) - -define @test_vloxseg4_nxv1f32_nxv32i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv32i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv16i32(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv16i32(,,,, float*, , , i64) - -define @test_vloxseg4_nxv1f32_nxv16i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv16i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv2i16(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv2i16(,,,, float*, , , i64) - -define @test_vloxseg4_nxv1f32_nxv2i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv2i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv2i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv2i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv2i64(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv2i64(,,,, float*, , , i64) - -define @test_vloxseg4_nxv1f32_nxv2i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv2i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv2i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv2i64( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv16i16(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv16i16(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv1f32_nxv16i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv16i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv32i16(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv32i16(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv1f32_nxv32i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv32i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv4i32(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv4i32(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv1f32_nxv4i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv4i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv16i8(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv16i8(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv1f32_nxv16i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv16i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i64(float*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i64(,,,,, float*, , , i64) @@ -90278,25 +14718,22 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i64( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i32(float*, , i64) @@ -90315,101 +14752,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv8i16(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv8i16(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv1f32_nxv8i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv8i16(float* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv8i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv8i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv4i8(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv4i8(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv1f32_nxv4i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv4i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i16(float*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16(,,,,, float*, , , i64) @@ -90426,249 +14786,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv2i32(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv2i32(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv1f32_nxv2i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv2i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv2i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv2i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv8i8(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv8i8(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv1f32_nxv8i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv8i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv8i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv8i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv4i64(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv4i64(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv1f32_nxv4i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv4i64( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv64i8(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv64i8(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv1f32_nxv64i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv64i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv4i16(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv4i16(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv1f32_nxv4i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv4i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv8i64(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv8i64(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv1f32_nxv8i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv8i64( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i8(float*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8(,,,,, float*, , , i64) @@ -90685,401 +14820,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv2i8(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv2i8(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv1f32_nxv2i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv2i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv2i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv2i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv8i32(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv8i32(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv1f32_nxv8i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv8i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv8i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv8i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv32i8(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv32i8(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv1f32_nxv32i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv32i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv16i32(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv16i32(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv1f32_nxv16i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv16i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv2i16(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv2i16(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv1f32_nxv2i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv2i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv2i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv2i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv2i64(float*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv2i64(,,,,, float*, , , i64) - -define @test_vloxseg5_nxv1f32_nxv2i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv2i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv2i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv2i64( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv16i16(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv16i16(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv1f32_nxv16i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv16i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv32i16(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv32i16(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv1f32_nxv32i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv32i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv4i32(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv4i32(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv1f32_nxv4i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv4i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv16i8(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv16i8(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv1f32_nxv16i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv16i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i64(float*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i64(,,,,,, float*, , , i64) @@ -91096,26 +14854,23 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i64( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i32(float*, , i64) @@ -91134,104 +14889,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv8i16(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv8i16(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv1f32_nxv8i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv8i16(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv8i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv8i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv4i8(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv4i8(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv1f32_nxv4i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv4i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i16(float*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16(,,,,,, float*, , , i64) @@ -91248,256 +14924,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv2i32(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv2i32(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv1f32_nxv2i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv2i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv2i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv2i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv8i8(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv8i8(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv1f32_nxv8i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv8i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv8i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv8i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv4i64(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv4i64(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv1f32_nxv4i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv4i64( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv64i8(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv64i8(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv1f32_nxv64i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv64i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv4i16(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv4i16(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv1f32_nxv4i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv4i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv8i64(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv8i64(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv1f32_nxv8i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv8i64( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i8(float*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8(,,,,,, float*, , , i64) @@ -91514,412 +14959,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv2i8(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv2i8(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv1f32_nxv2i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv2i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv2i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv2i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv8i32(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv8i32(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv1f32_nxv8i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv8i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv8i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv8i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv32i8(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv32i8(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv1f32_nxv32i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv32i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv16i32(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv16i32(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv1f32_nxv16i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv16i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv2i16(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv2i16(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv1f32_nxv2i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv2i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv2i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv2i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv2i64(float*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv2i64(,,,,,, float*, , , i64) - -define @test_vloxseg6_nxv1f32_nxv2i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv2i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv2i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv2i64( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv16i16(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv16i16(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv1f32_nxv16i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv16i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv32i16(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv32i16(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv1f32_nxv32i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv32i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv4i32(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv4i32(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv1f32_nxv4i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv4i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv16i8(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv16i8(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv1f32_nxv16i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv16i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i64(float*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i64(,,,,,,, float*, , , i64) @@ -91936,11 +14994,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -91948,15 +15005,13 @@ define @test_vloxseg7_mask_nxv1f32_nxv1i64(float* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i64( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i32(float*, , i64) @@ -91975,11 +15030,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -91987,95 +15041,15 @@ define @test_vloxseg7_mask_nxv1f32_nxv1i32(float* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv8i16(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv8i16(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv1f32_nxv8i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv8i16(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv8i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv8i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv4i8(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv4i8(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv1f32_nxv4i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv4i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i16(float*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16(,,,,,,, float*, , , i64) @@ -92092,11 +15066,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -92104,251 +15077,15 @@ define @test_vloxseg7_mask_nxv1f32_nxv1i16(float* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv2i32(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv2i32(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv1f32_nxv2i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv2i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv2i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv2i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv8i8(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv8i8(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv1f32_nxv8i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv8i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv8i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv8i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv4i64(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv4i64(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv1f32_nxv4i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv4i64( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv64i8(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv64i8(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv1f32_nxv64i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv64i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv4i16(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv4i16(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv1f32_nxv4i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv4i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv8i64(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv8i64(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv1f32_nxv8i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv8i64( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i8(float*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8(,,,,,,, float*, , , i64) @@ -92365,11 +15102,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -92377,411 +15113,15 @@ define @test_vloxseg7_mask_nxv1f32_nxv1i8(float* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv2i8(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv2i8(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv1f32_nxv2i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv2i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv2i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv2i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv8i32(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv8i32(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv1f32_nxv8i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv8i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv8i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv8i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv32i8(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv32i8(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv1f32_nxv32i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv32i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv16i32(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv16i32(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv1f32_nxv16i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv16i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv2i16(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv2i16(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv1f32_nxv2i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv2i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv2i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv2i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv2i64(float*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv2i64(,,,,,,, float*, , , i64) - -define @test_vloxseg7_nxv1f32_nxv2i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv2i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv2i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv2i64( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv16i16(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv16i16(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv1f32_nxv16i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv32i16(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv32i16(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv1f32_nxv32i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv4i32(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv4i32(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv1f32_nxv4i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv16i8(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv16i8(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv1f32_nxv16i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i64(float*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i64(,,,,,,,, float*, , , i64) @@ -92798,28 +15138,25 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i32(float*, , i64) @@ -92838,51 +15175,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv8i16(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv8i16(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv1f32_nxv8i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv8i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -92891,57 +15187,15 @@ define @test_vloxseg8_mask_nxv1f32_nxv8i16(float* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv8i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv4i8(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv4i8(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv1f32_nxv4i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv4i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv1f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i16(float*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16(,,,,,,,, float*, , , i64) @@ -92958,270 +15212,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv2i32(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv2i32(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv1f32_nxv2i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv2i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv1f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv2i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv8i8(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv8i8(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv1f32_nxv8i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv8i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv8i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv4i64(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv4i64(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv1f32_nxv4i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv64i8(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv64i8(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv1f32_nxv64i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv4i16(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv4i16(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv1f32_nxv4i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv8i64(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv8i64(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv1f32_nxv8i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i8(float*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8(,,,,,,,, float*, , , i64) @@ -93238,251 +15249,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv2i8(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv2i8(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv1f32_nxv2i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv2i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv2i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv8i32(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv8i32(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv1f32_nxv8i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv8i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv8i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv32i8(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv32i8(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv1f32_nxv32i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv16i32(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv16i32(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv1f32_nxv16i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv2i16(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv2i16(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv1f32_nxv2i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv2i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv2i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv2i64(float*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv2i64(,,,,,,,, float*, , , i64) - -define @test_vloxseg8_nxv1f32_nxv2i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv2i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -93491,219 +15261,13 @@ define @test_vloxseg8_mask_nxv1f32_nxv2i64(float* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv2i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv16i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv16i16(,, half*, , , i64) - -define @test_vloxseg2_nxv8f16_nxv16i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv16i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv32i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv32i16(,, half*, , , i64) - -define @test_vloxseg2_nxv8f16_nxv32i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv32i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv4i32(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv4i32(,, half*, , , i64) - -define @test_vloxseg2_nxv8f16_nxv4i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv4i32( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv16i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv16i8(,, half*, , , i64) - -define @test_vloxseg2_nxv8f16_nxv16i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv16i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv1i64(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv1i64(,, half*, , , i64) - -define @test_vloxseg2_nxv8f16_nxv1i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv1i64( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv1i32(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv1i32(,, half*, , , i64) - -define @test_vloxseg2_nxv8f16_nxv1i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv1i32( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i16(half*, , i64) @@ -93722,126 +15286,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv8f16_nxv8i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv4i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv4i8(,, half*, , , i64) - -define @test_vloxseg2_nxv8f16_nxv4i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv4i8(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16( %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv8f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv4i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv1i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv1i16(,, half*, , , i64) - -define @test_vloxseg2_nxv8f16_nxv1i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv1i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv2i32(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv2i32(,, half*, , , i64) - -define @test_vloxseg2_nxv8f16_nxv2i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv2i32( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i8(half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8(,, half*, , , i64) @@ -93858,126 +15316,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv8f16_nxv8i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv4i64(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv4i64(,, half*, , , i64) - -define @test_vloxseg2_nxv8f16_nxv4i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv4i64(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8( %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv8f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv4i64( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv64i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv64i8(,, half*, , , i64) - -define @test_vloxseg2_nxv8f16_nxv64i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv64i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv4i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv4i16(,, half*, , , i64) - -define @test_vloxseg2_nxv8f16_nxv4i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv4i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i64(half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i64(,, half*, , , i64) @@ -93994,92 +15346,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv8f16_nxv8i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v16, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i64( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv1i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv1i8(,, half*, , , i64) - -define @test_vloxseg2_nxv8f16_nxv1i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv1i8(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i64( %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv8f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv1i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv2i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv2i8(,, half*, , , i64) - -define @test_vloxseg2_nxv8f16_nxv2i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv2i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i32(half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32(,, half*, , , i64) @@ -94096,370 +15376,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv8f16_nxv8i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv32i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv32i8(,, half*, , , i64) - -define @test_vloxseg2_nxv8f16_nxv32i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv32i8(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32( %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv8f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv32i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv16i32(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv16i32(,, half*, , , i64) - -define @test_vloxseg2_nxv8f16_nxv16i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv16i32( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv2i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv2i16(,, half*, , , i64) - -define @test_vloxseg2_nxv8f16_nxv2i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv2i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv2i64(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv2i64(,, half*, , , i64) - -define @test_vloxseg2_nxv8f16_nxv2i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv2i64( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv16i16(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv16i16(,,, half*, , , i64) - -define @test_vloxseg3_nxv8f16_nxv16i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv16i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv32i16(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv32i16(,,, half*, , , i64) - -define @test_vloxseg3_nxv8f16_nxv32i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv32i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv4i32(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv4i32(,,, half*, , , i64) - -define @test_vloxseg3_nxv8f16_nxv4i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv4i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv16i8(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv16i8(,,, half*, , , i64) - -define @test_vloxseg3_nxv8f16_nxv16i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv16i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv1i64(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv1i64(,,, half*, , , i64) - -define @test_vloxseg3_nxv8f16_nxv1i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv1i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv1i32(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv1i32(,,, half*, , , i64) - -define @test_vloxseg3_nxv8f16_nxv1i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv1i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i16(half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16(,,, half*, , , i64) @@ -94476,130 +15406,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv8f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv8f16_nxv8i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv4i8(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv4i8(,,, half*, , , i64) - -define @test_vloxseg3_nxv8f16_nxv4i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv4i8(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv8f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv4i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv1i16(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv1i16(,,, half*, , , i64) - -define @test_vloxseg3_nxv8f16_nxv1i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv1i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv2i32(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv2i32(,,, half*, , , i64) - -define @test_vloxseg3_nxv8f16_nxv2i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv2i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i8(half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8(,,, half*, , , i64) @@ -94616,130 +15438,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv8f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv8f16_nxv8i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv4i64(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv4i64(,,, half*, , , i64) - -define @test_vloxseg3_nxv8f16_nxv4i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv4i64(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv8f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv4i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv64i8(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv64i8(,,, half*, , , i64) - -define @test_vloxseg3_nxv8f16_nxv64i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv64i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv4i16(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv4i16(,,, half*, , , i64) - -define @test_vloxseg3_nxv8f16_nxv4i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv4i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i64(half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i64(,,, half*, , , i64) @@ -94756,95 +15470,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv8f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv8f16_nxv8i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v16, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv1i8(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv1i8(,,, half*, , , i64) - -define @test_vloxseg3_nxv8f16_nxv1i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv1i8(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv8f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv1i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv2i8(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv2i8(,,, half*, , , i64) - -define @test_vloxseg3_nxv8f16_nxv2i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv2i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i32(half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32(,,, half*, , , i64) @@ -94861,381 +15501,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv8f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv8f16_nxv8i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv32i8(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv32i8(,,, half*, , , i64) - -define @test_vloxseg3_nxv8f16_nxv32i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv32i8(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv8f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv32i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv16i32(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv16i32(,,, half*, , , i64) - -define @test_vloxseg3_nxv8f16_nxv16i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv16i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv2i16(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv2i16(,,, half*, , , i64) - -define @test_vloxseg3_nxv8f16_nxv2i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv2i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv2i64(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv2i64(,,, half*, , , i64) - -define @test_vloxseg3_nxv8f16_nxv2i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv2i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv16i16(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv16i16(,,,, half*, , , i64) - -define @test_vloxseg4_nxv8f16_nxv16i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv16i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv32i16(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv32i16(,,,, half*, , , i64) - -define @test_vloxseg4_nxv8f16_nxv32i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv32i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv4i32(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv4i32(,,,, half*, , , i64) - -define @test_vloxseg4_nxv8f16_nxv4i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv4i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv16i8(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv16i8(,,,, half*, , , i64) - -define @test_vloxseg4_nxv8f16_nxv16i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv16i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv1i64(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv1i64(,,,, half*, , , i64) - -define @test_vloxseg4_nxv8f16_nxv1i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv1i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv1i32(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv1i32(,,,, half*, , , i64) - -define @test_vloxseg4_nxv8f16_nxv1i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv1i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i16(half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16(,,,, half*, , , i64) @@ -95252,134 +15532,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv8f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv8f16_nxv8i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv4i8(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv4i8(,,,, half*, , , i64) - -define @test_vloxseg4_nxv8f16_nxv4i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv4i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv8f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv4i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv1i16(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv1i16(,,,, half*, , , i64) - -define @test_vloxseg4_nxv8f16_nxv1i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv1i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv2i32(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv2i32(,,,, half*, , , i64) - -define @test_vloxseg4_nxv8f16_nxv2i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv2i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i8(half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8(,,,, half*, , , i64) @@ -95396,134 +15565,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv8f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv8f16_nxv8i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv4i64(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv4i64(,,,, half*, , , i64) - -define @test_vloxseg4_nxv8f16_nxv4i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv4i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv64i8(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv64i8(,,,, half*, , , i64) - -define @test_vloxseg4_nxv8f16_nxv64i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv64i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv8f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv64i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv4i16(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv4i16(,,,, half*, , , i64) - -define @test_vloxseg4_nxv8f16_nxv4i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv4i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i64(half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i64(,,,, half*, , , i64) @@ -95540,98 +15598,22 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv8f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv8f16_nxv8i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: vloxseg4ei64.v v6, (a0), v16, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv1i8(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv1i8(,,,, half*, , , i64) - -define @test_vloxseg4_nxv8f16_nxv1i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv1i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv8f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv1i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv2i8(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv2i8(,,,, half*, , , i64) - -define @test_vloxseg4_nxv8f16_nxv2i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv2i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i32(half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32(,,,, half*, , , i64) @@ -95648,374 +15630,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv8f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv8f16_nxv8i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv32i8(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv32i8(,,,, half*, , , i64) - -define @test_vloxseg4_nxv8f16_nxv32i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv32i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv16i32(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv16i32(,,,, half*, , , i64) - -define @test_vloxseg4_nxv8f16_nxv16i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv16i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv2i16(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv2i16(,,,, half*, , , i64) - -define @test_vloxseg4_nxv8f16_nxv2i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv2i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv8f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv2i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv2i64(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv2i64(,,,, half*, , , i64) - -define @test_vloxseg4_nxv8f16_nxv2i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv2i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv16i16(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv16i16(,, float*, , , i64) - -define @test_vloxseg2_nxv8f32_nxv16i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv16i16( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv32i16(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv32i16(,, float*, , , i64) - -define @test_vloxseg2_nxv8f32_nxv32i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv32i16( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv4i32(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv4i32(,, float*, , , i64) - -define @test_vloxseg2_nxv8f32_nxv4i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv4i32( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv16i8(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv16i8(,, float*, , , i64) - -define @test_vloxseg2_nxv8f32_nxv16i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv16i8( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv1i64(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv1i64(,, float*, , , i64) - -define @test_vloxseg2_nxv8f32_nxv1i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv1i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv1i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv1i64( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv1i32(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv1i32(,, float*, , , i64) - -define @test_vloxseg2_nxv8f32_nxv1i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv1i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv1i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv1i32( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i16(float*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16(,, float*, , , i64) @@ -96032,126 +15663,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv8f32_nxv8i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv4i8(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv4i8(,, float*, , , i64) - -define @test_vloxseg2_nxv8f32_nxv4i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv4i8(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16( %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv8f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv4i8( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv1i16(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv1i16(,, float*, , , i64) - -define @test_vloxseg2_nxv8f32_nxv1i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv1i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv1i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv1i16( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv2i32(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv2i32(,, float*, , , i64) - -define @test_vloxseg2_nxv8f32_nxv2i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv2i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv2i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv2i32( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i8(float*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8(,, float*, , , i64) @@ -96168,126 +15693,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv8f32_nxv8i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv4i64(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv4i64(,, float*, , , i64) - -define @test_vloxseg2_nxv8f32_nxv4i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv4i64(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8( %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv8f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv4i64( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv64i8(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv64i8(,, float*, , , i64) - -define @test_vloxseg2_nxv8f32_nxv64i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv64i8( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv4i16(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv4i16(,, float*, , , i64) - -define @test_vloxseg2_nxv8f32_nxv4i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv4i16( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i64(float*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i64(,, float*, , , i64) @@ -96304,92 +15723,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv8f32_nxv8i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vloxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i64( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv1i8(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv1i8(,, float*, , , i64) - -define @test_vloxseg2_nxv8f32_nxv1i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv1i8(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i64( %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv8f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv1i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv1i8( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv2i8(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv2i8(,, float*, , , i64) - -define @test_vloxseg2_nxv8f32_nxv2i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv2i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv2i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv2i8( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i32(float*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32(,, float*, , , i64) @@ -96406,466 +15753,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv8f32_nxv8i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv32i8(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv32i8(,, float*, , , i64) - -define @test_vloxseg2_nxv8f32_nxv32i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv32i8(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32( %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv8f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv32i8( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv16i32(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv16i32(,, float*, , , i64) - -define @test_vloxseg2_nxv8f32_nxv16i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv16i32( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv2i16(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv2i16(,, float*, , , i64) - -define @test_vloxseg2_nxv8f32_nxv2i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv2i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv2i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv2i16( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv2i64(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv2i64(,, float*, , , i64) - -define @test_vloxseg2_nxv8f32_nxv2i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv2i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv2i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv2i64( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv16i16(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv16i16(,, double*, , , i64) - -define @test_vloxseg2_nxv2f64_nxv16i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv16i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f64_nxv16i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv16i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv16i16( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv32i16(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv32i16(,, double*, , , i64) - -define @test_vloxseg2_nxv2f64_nxv32i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv32i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f64_nxv32i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv32i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv32i16( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv4i32(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv4i32(,, double*, , , i64) - -define @test_vloxseg2_nxv2f64_nxv4i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv4i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f64_nxv4i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv4i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv4i32( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv16i8(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv16i8(,, double*, , , i64) - -define @test_vloxseg2_nxv2f64_nxv16i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv16i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f64_nxv16i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv16i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv16i8( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv1i64(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv1i64(,, double*, , , i64) - -define @test_vloxseg2_nxv2f64_nxv1i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv1i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f64_nxv1i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv1i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv1i64( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv1i32(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv1i32(,, double*, , , i64) - -define @test_vloxseg2_nxv2f64_nxv1i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv1i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f64_nxv1i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv1i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv1i32( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv8i16(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv8i16(,, double*, , , i64) - -define @test_vloxseg2_nxv2f64_nxv8i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv8i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f64_nxv8i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv8i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv8i16( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv4i8(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv4i8(,, double*, , , i64) - -define @test_vloxseg2_nxv2f64_nxv4i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv4i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f64_nxv4i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv4i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv4i8( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv1i16(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv1i16(,, double*, , , i64) - -define @test_vloxseg2_nxv2f64_nxv1i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv1i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f64_nxv1i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv1i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv1i16( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i32(double*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32(,, double*, , , i64) @@ -96882,228 +15783,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2f64_nxv2i32(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv2f64_nxv2i32( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv8i8(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv8i8(,, double*, , , i64) - -define @test_vloxseg2_nxv2f64_nxv8i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv8i8(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32( %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2f64_nxv8i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv8i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv8i8( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv4i64(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv4i64(,, double*, , , i64) - -define @test_vloxseg2_nxv2f64_nxv4i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv4i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f64_nxv4i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv4i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv4i64( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv64i8(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv64i8(,, double*, , , i64) - -define @test_vloxseg2_nxv2f64_nxv64i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv64i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f64_nxv64i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv64i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv64i8( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv4i16(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv4i16(,, double*, , , i64) - -define @test_vloxseg2_nxv2f64_nxv4i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv4i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f64_nxv4i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv4i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv4i16( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv8i64(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv8i64(,, double*, , , i64) - -define @test_vloxseg2_nxv2f64_nxv8i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv8i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f64_nxv8i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv8i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv8i64( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv1i8(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv1i8(,, double*, , , i64) - -define @test_vloxseg2_nxv2f64_nxv1i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv1i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f64_nxv1i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv1i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv1i8( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i8(double*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8(,, double*, , , i64) @@ -97120,126 +15813,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2f64_nxv2i8(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv2f64_nxv2i8( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv8i32(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv8i32(,, double*, , , i64) - -define @test_vloxseg2_nxv2f64_nxv8i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv8i32(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8( %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2f64_nxv8i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv8i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv8i32( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv32i8(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv32i8(,, double*, , , i64) - -define @test_vloxseg2_nxv2f64_nxv32i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv32i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f64_nxv32i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv32i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv32i8( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv16i32(double*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv16i32(,, double*, , , i64) - -define @test_vloxseg2_nxv2f64_nxv16i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv16i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f64_nxv16i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv16i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv16i32( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i16(double*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16(,, double*, , , i64) @@ -97256,22 +15843,18 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2f64_nxv2i16(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv2f64_nxv2i16( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16( %val, %val, double* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i64(double*, , i64) @@ -97290,339 +15873,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2f64_nxv2i64(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv2f64_nxv2i64( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i64( %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv16i16(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv16i16(,,, double*, , , i64) - -define @test_vloxseg3_nxv2f64_nxv16i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv16i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i64( %val, %val, double* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2f64_nxv16i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv16i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv16i16( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv32i16(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv32i16(,,, double*, , , i64) - -define @test_vloxseg3_nxv2f64_nxv32i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv32i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f64_nxv32i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv32i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv32i16( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv4i32(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv4i32(,,, double*, , , i64) - -define @test_vloxseg3_nxv2f64_nxv4i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv4i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f64_nxv4i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv4i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv4i32( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv16i8(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv16i8(,,, double*, , , i64) - -define @test_vloxseg3_nxv2f64_nxv16i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv16i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f64_nxv16i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv16i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv16i8( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv1i64(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv1i64(,,, double*, , , i64) - -define @test_vloxseg3_nxv2f64_nxv1i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv1i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f64_nxv1i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv1i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv1i64( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv1i32(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv1i32(,,, double*, , , i64) - -define @test_vloxseg3_nxv2f64_nxv1i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv1i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f64_nxv1i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv1i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv1i32( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv8i16(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv8i16(,,, double*, , , i64) - -define @test_vloxseg3_nxv2f64_nxv8i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv8i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f64_nxv8i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv8i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv8i16( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv4i8(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv4i8(,,, double*, , , i64) - -define @test_vloxseg3_nxv2f64_nxv4i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv4i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f64_nxv4i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv4i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv4i8( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv1i16(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv1i16(,,, double*, , , i64) - -define @test_vloxseg3_nxv2f64_nxv1i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv1i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f64_nxv1i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv1i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv1i16( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i32(double*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32(,,, double*, , , i64) @@ -97639,235 +15903,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2f64_nxv2i32(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv2f64_nxv2i32( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv8i8(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv8i8(,,, double*, , , i64) - -define @test_vloxseg3_nxv2f64_nxv8i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv8i8(double* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2f64_nxv8i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv8i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv8i8( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv4i64(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv4i64(,,, double*, , , i64) - -define @test_vloxseg3_nxv2f64_nxv4i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv4i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f64_nxv4i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv4i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv4i64( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv64i8(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv64i8(,,, double*, , , i64) - -define @test_vloxseg3_nxv2f64_nxv64i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv64i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f64_nxv64i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv64i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv64i8( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv4i16(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv4i16(,,, double*, , , i64) - -define @test_vloxseg3_nxv2f64_nxv4i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv4i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f64_nxv4i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv4i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv4i16( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv8i64(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv8i64(,,, double*, , , i64) - -define @test_vloxseg3_nxv2f64_nxv8i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv8i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f64_nxv8i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv8i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv8i64( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv1i8(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv1i8(,,, double*, , , i64) - -define @test_vloxseg3_nxv2f64_nxv1i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv1i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f64_nxv1i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv1i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv1i8( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i8(double*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8(,,, double*, , , i64) @@ -97884,130 +15935,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2f64_nxv2i8(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv2f64_nxv2i8( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv8i32(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv8i32(,,, double*, , , i64) - -define @test_vloxseg3_nxv2f64_nxv8i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv8i32(double* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2f64_nxv8i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv8i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv8i32( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv32i8(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv32i8(,,, double*, , , i64) - -define @test_vloxseg3_nxv2f64_nxv32i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv32i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f64_nxv32i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv32i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv32i8( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv16i32(double*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv16i32(,,, double*, , , i64) - -define @test_vloxseg3_nxv2f64_nxv16i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv16i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f64_nxv16i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv16i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv16i32( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i16(double*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16(,,, double*, , , i64) @@ -98024,23 +15967,20 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2f64_nxv2i16(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv2f64_nxv2i16( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i64(double*, , i64) @@ -98059,349 +15999,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2f64_nxv2i64(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv2f64_nxv2i64( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i64( %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv16i16(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv16i16(,,,, double*, , , i64) - -define @test_vloxseg4_nxv2f64_nxv16i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv16i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2f64_nxv16i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv16i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv16i16( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv32i16(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv32i16(,,,, double*, , , i64) - -define @test_vloxseg4_nxv2f64_nxv32i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv32i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f64_nxv32i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv32i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv32i16( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv4i32(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv4i32(,,,, double*, , , i64) - -define @test_vloxseg4_nxv2f64_nxv4i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv4i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f64_nxv4i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv4i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv4i32( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv16i8(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv16i8(,,,, double*, , , i64) - -define @test_vloxseg4_nxv2f64_nxv16i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv16i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f64_nxv16i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv16i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv16i8( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv1i64(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv1i64(,,,, double*, , , i64) - -define @test_vloxseg4_nxv2f64_nxv1i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv1i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f64_nxv1i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv1i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv1i64( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv1i32(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv1i32(,,,, double*, , , i64) - -define @test_vloxseg4_nxv2f64_nxv1i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv1i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f64_nxv1i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv1i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv1i32( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv8i16(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv8i16(,,,, double*, , , i64) - -define @test_vloxseg4_nxv2f64_nxv8i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv8i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f64_nxv8i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv8i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv8i16( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv4i8(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv4i8(,,,, double*, , , i64) - -define @test_vloxseg4_nxv2f64_nxv4i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv4i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f64_nxv4i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv4i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv4i8( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv1i16(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv1i16(,,,, double*, , , i64) - -define @test_vloxseg4_nxv2f64_nxv1i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv1i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f64_nxv1i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv1i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv1i16( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i32(double*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32(,,,, double*, , , i64) @@ -98418,242 +16031,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2f64_nxv2i32(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv2f64_nxv2i32( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv8i8(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv8i8(,,,, double*, , , i64) - -define @test_vloxseg4_nxv2f64_nxv8i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv8i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f64_nxv8i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv8i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv8i8( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv4i64(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv4i64(,,,, double*, , , i64) - -define @test_vloxseg4_nxv2f64_nxv4i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv4i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f64_nxv4i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv4i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv4i64( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv64i8(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv64i8(,,,, double*, , , i64) - -define @test_vloxseg4_nxv2f64_nxv64i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv64i8(double* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2f64_nxv64i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv64i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv64i8( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv4i16(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv4i16(,,,, double*, , , i64) - -define @test_vloxseg4_nxv2f64_nxv4i16(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv4i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f64_nxv4i16(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv4i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv4i16( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv8i64(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv8i64(,,,, double*, , , i64) - -define @test_vloxseg4_nxv2f64_nxv8i64(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv8i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f64_nxv8i64(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv8i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv8i64( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv1i8(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv1i8(,,,, double*, , , i64) - -define @test_vloxseg4_nxv2f64_nxv1i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv1i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f64_nxv1i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv1i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv1i8( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i8(double*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8(,,,, double*, , , i64) @@ -98670,134 +16064,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2f64_nxv2i8(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv2f64_nxv2i8( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv8i32(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv8i32(,,,, double*, , , i64) - -define @test_vloxseg4_nxv2f64_nxv8i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv8i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f64_nxv8i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv8i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv8i32( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv32i8(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv32i8(,,,, double*, , , i64) - -define @test_vloxseg4_nxv2f64_nxv32i8(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv32i8(double* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2f64_nxv32i8(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv32i8(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv32i8( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv16i32(double*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv16i32(,,,, double*, , , i64) - -define @test_vloxseg4_nxv2f64_nxv16i32(double* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv16i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f64_nxv16i32(double* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv16i32(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv16i32( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i16(double*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16(,,,, double*, , , i64) @@ -98814,24 +16097,21 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2f64_nxv2i16(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv2f64_nxv2i16( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i64(double*, , i64) @@ -98850,92 +16130,21 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2f64_nxv2i64(double* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv2f64_nxv2i64( %val, double* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i64( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv16i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv16i16(,, half*, , , i64) - -define @test_vloxseg2_nxv4f16_nxv16i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv16i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv32i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv32i16(,, half*, , , i64) - -define @test_vloxseg2_nxv4f16_nxv32i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv32i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i32(half*, , i64) @@ -98954,160 +16163,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv16i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv16i8(,, half*, , , i64) - -define @test_vloxseg2_nxv4f16_nxv16i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv16i8(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32( %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv16i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv1i64(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv1i64(,, half*, , , i64) - -define @test_vloxseg2_nxv4f16_nxv1i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv1i64( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv1i32(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv1i32(,, half*, , , i64) - -define @test_vloxseg2_nxv4f16_nxv1i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv1i32( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv8i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv8i16(,, half*, , , i64) - -define @test_vloxseg2_nxv4f16_nxv8i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv8i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i8(half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8(,, half*, , , i64) @@ -99124,126 +16193,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv1i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv1i16(,, half*, , , i64) - -define @test_vloxseg2_nxv4f16_nxv1i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv1i16(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8( %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv1i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv2i32(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv2i32(,, half*, , , i64) - -define @test_vloxseg2_nxv4f16_nxv2i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv2i32( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv8i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv8i8(,, half*, , , i64) - -define @test_vloxseg2_nxv4f16_nxv8i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv8i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i64(half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i64(,, half*, , , i64) @@ -99260,58 +16223,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv4f16_nxv4i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i64( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv64i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv64i8(,, half*, , , i64) - -define @test_vloxseg2_nxv4f16_nxv64i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv64i8(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i64( %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv64i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i16(half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16(,, half*, , , i64) @@ -99328,366 +16253,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv8i64(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv8i64(,, half*, , , i64) - -define @test_vloxseg2_nxv4f16_nxv8i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv8i64(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16( %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv8i64( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv1i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv1i8(,, half*, , , i64) - -define @test_vloxseg2_nxv4f16_nxv1i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv1i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv2i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv2i8(,, half*, , , i64) - -define @test_vloxseg2_nxv4f16_nxv2i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv2i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv8i32(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv8i32(,, half*, , , i64) - -define @test_vloxseg2_nxv4f16_nxv8i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv8i32( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv32i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv32i8(,, half*, , , i64) - -define @test_vloxseg2_nxv4f16_nxv32i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv32i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv16i32(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv16i32(,, half*, , , i64) - -define @test_vloxseg2_nxv4f16_nxv16i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv16i32( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv2i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv2i16(,, half*, , , i64) - -define @test_vloxseg2_nxv4f16_nxv2i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv2i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv2i64(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv2i64(,, half*, , , i64) - -define @test_vloxseg2_nxv4f16_nxv2i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv2i64( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv16i16(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv16i16(,,, half*, , , i64) - -define @test_vloxseg3_nxv4f16_nxv16i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv16i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv32i16(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv32i16(,,, half*, , , i64) - -define @test_vloxseg3_nxv4f16_nxv32i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv32i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i32(half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32(,,, half*, , , i64) @@ -99704,165 +16283,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv16i8(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv16i8(,,, half*, , , i64) - -define @test_vloxseg3_nxv4f16_nxv16i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv16i8(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv16i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv1i64(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv1i64(,,, half*, , , i64) - -define @test_vloxseg3_nxv4f16_nxv1i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv1i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv1i32(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv1i32(,,, half*, , , i64) - -define @test_vloxseg3_nxv4f16_nxv1i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv1i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv8i16(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv8i16(,,, half*, , , i64) - -define @test_vloxseg3_nxv4f16_nxv8i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv8i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i8(half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8(,,, half*, , , i64) @@ -99879,130 +16314,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv1i16(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv1i16(,,, half*, , , i64) - -define @test_vloxseg3_nxv4f16_nxv1i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv1i16(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv1i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv2i32(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv2i32(,,, half*, , , i64) - -define @test_vloxseg3_nxv4f16_nxv2i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv2i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv8i8(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv8i8(,,, half*, , , i64) - -define @test_vloxseg3_nxv4f16_nxv8i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv8i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i64(half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i64(,,, half*, , , i64) @@ -100019,60 +16346,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv4f16_nxv4i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv64i8(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv64i8(,,, half*, , , i64) - -define @test_vloxseg3_nxv4f16_nxv64i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv64i8(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv64i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i16(half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16(,,, half*, , , i64) @@ -100089,377 +16377,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv8i64(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv8i64(,,, half*, , , i64) - -define @test_vloxseg3_nxv4f16_nxv8i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv8i64(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv8i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv1i8(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv1i8(,,, half*, , , i64) - -define @test_vloxseg3_nxv4f16_nxv1i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv1i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv2i8(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv2i8(,,, half*, , , i64) - -define @test_vloxseg3_nxv4f16_nxv2i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv2i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv8i32(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv8i32(,,, half*, , , i64) - -define @test_vloxseg3_nxv4f16_nxv8i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv8i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv32i8(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv32i8(,,, half*, , , i64) - -define @test_vloxseg3_nxv4f16_nxv32i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv32i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv16i32(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv16i32(,,, half*, , , i64) - -define @test_vloxseg3_nxv4f16_nxv16i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv16i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv2i16(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv2i16(,,, half*, , , i64) - -define @test_vloxseg3_nxv4f16_nxv2i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv2i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv2i64(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv2i64(,,, half*, , , i64) - -define @test_vloxseg3_nxv4f16_nxv2i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv2i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv16i16(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv16i16(,,,, half*, , , i64) - -define @test_vloxseg4_nxv4f16_nxv16i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv16i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv32i16(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv32i16(,,,, half*, , , i64) - -define @test_vloxseg4_nxv4f16_nxv32i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv32i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i32(half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32(,,,, half*, , , i64) @@ -100476,170 +16409,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv16i8(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv16i8(,,,, half*, , , i64) - -define @test_vloxseg4_nxv4f16_nxv16i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv16i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv16i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv1i64(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv1i64(,,,, half*, , , i64) - -define @test_vloxseg4_nxv4f16_nxv1i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv1i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv1i32(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv1i32(,,,, half*, , , i64) - -define @test_vloxseg4_nxv4f16_nxv1i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv1i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv8i16(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv8i16(,,,, half*, , , i64) - -define @test_vloxseg4_nxv4f16_nxv8i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv8i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i8(half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8(,,,, half*, , , i64) @@ -100656,134 +16442,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv1i16(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv1i16(,,,, half*, , , i64) - -define @test_vloxseg4_nxv4f16_nxv1i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv1i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv1i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv2i32(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv2i32(,,,, half*, , , i64) - -define @test_vloxseg4_nxv4f16_nxv2i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv2i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv8i8(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv8i8(,,,, half*, , , i64) - -define @test_vloxseg4_nxv4f16_nxv8i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv8i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i64(half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i64(,,,, half*, , , i64) @@ -100800,62 +16475,22 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv4f16_nxv4i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv64i8(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv64i8(,,,, half*, , , i64) - -define @test_vloxseg4_nxv4f16_nxv64i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv64i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv64i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i16(half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16(,,,, half*, , , i64) @@ -100872,388 +16507,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv8i64(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv8i64(,,,, half*, , , i64) - -define @test_vloxseg4_nxv4f16_nxv8i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv8i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv8i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv1i8(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv1i8(,,,, half*, , , i64) - -define @test_vloxseg4_nxv4f16_nxv1i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv1i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv2i8(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv2i8(,,,, half*, , , i64) - -define @test_vloxseg4_nxv4f16_nxv2i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv2i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv8i32(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv8i32(,,,, half*, , , i64) - -define @test_vloxseg4_nxv4f16_nxv8i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv8i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv32i8(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv32i8(,,,, half*, , , i64) - -define @test_vloxseg4_nxv4f16_nxv32i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv32i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv16i32(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv16i32(,,,, half*, , , i64) - -define @test_vloxseg4_nxv4f16_nxv16i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv16i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv2i16(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv2i16(,,,, half*, , , i64) - -define @test_vloxseg4_nxv4f16_nxv2i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv2i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv2i64(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv2i64(,,,, half*, , , i64) - -define @test_vloxseg4_nxv4f16_nxv2i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv2i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv16i16(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv16i16(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv4f16_nxv16i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv16i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv32i16(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv32i16(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv4f16_nxv32i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv32i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i32(half*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32(,,,,, half*, , , i64) @@ -101270,175 +16540,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv4f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv16i8(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv16i8(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv4f16_nxv16i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv16i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv4f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv16i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv1i64(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv1i64(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv4f16_nxv1i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv1i64( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv1i32(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv1i32(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv4f16_nxv1i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv1i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv8i16(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv8i16(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv4f16_nxv8i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv8i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i8(half*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8(,,,,, half*, , , i64) @@ -101455,138 +16574,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv4f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv1i16(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv1i16(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv4f16_nxv1i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv1i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv4f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv1i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv2i32(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv2i32(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv4f16_nxv2i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv2i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv8i8(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv8i8(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv4f16_nxv8i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv8i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i64(half*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i64(,,,,, half*, , , i64) @@ -101603,64 +16608,23 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv4f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv4f16_nxv4i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei64.v v7, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i64( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv64i8(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv64i8(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv4f16_nxv64i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv64i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv4f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv64i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i16(half*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16(,,,,, half*, , , i64) @@ -101677,399 +16641,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv4f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv8i64(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv8i64(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv4f16_nxv8i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv8i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv4f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv8i64( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv1i8(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv1i8(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv4f16_nxv1i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv1i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv2i8(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv2i8(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv4f16_nxv2i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv2i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv8i32(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv8i32(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv4f16_nxv8i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv8i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv32i8(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv32i8(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv4f16_nxv32i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv32i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv16i32(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv16i32(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv4f16_nxv16i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv16i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv2i16(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv2i16(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv4f16_nxv2i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv2i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv2i64(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv2i64(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv4f16_nxv2i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv2i64( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv16i16(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv16i16(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv4f16_nxv16i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv16i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv32i16(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv32i16(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv4f16_nxv32i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv32i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i32(half*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32(,,,,,, half*, , , i64) @@ -102086,180 +16675,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv4f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv16i8(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv16i8(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv4f16_nxv16i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv16i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv4f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv16i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv1i64(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv1i64(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv4f16_nxv1i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv1i64( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv1i32(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv1i32(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv4f16_nxv1i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv1i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv8i16(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv8i16(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv4f16_nxv8i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv8i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i8(half*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8(,,,,,, half*, , , i64) @@ -102276,142 +16710,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv4f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv1i16(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv1i16(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv4f16_nxv1i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv1i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv4f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv1i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv2i32(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv2i32(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv4f16_nxv2i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv2i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv8i8(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv8i8(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv4f16_nxv8i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv8i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i64(half*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i64(,,,,,, half*, , , i64) @@ -102428,66 +16745,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv4f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv4f16_nxv4i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i64( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv64i8(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv64i8(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv4f16_nxv64i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv64i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv4f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv64i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i16(half*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16(,,,,,, half*, , , i64) @@ -102504,410 +16780,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv4f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv8i64(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv8i64(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv4f16_nxv8i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv8i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv4f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv8i64( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv1i8(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv1i8(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv4f16_nxv1i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv1i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv2i8(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv2i8(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv4f16_nxv2i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv2i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv8i32(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv8i32(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv4f16_nxv8i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv8i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv32i8(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv32i8(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv4f16_nxv32i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv32i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv16i32(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv16i32(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv4f16_nxv16i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv16i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv2i16(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv2i16(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv4f16_nxv2i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv2i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv2i64(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv2i64(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv4f16_nxv2i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv2i64( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv16i16(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv16i16(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv4f16_nxv16i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv32i16(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv32i16(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv4f16_nxv32i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i32(half*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32(,,,,,,, half*, , , i64) @@ -102924,11 +16815,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv4f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -102936,173 +16826,15 @@ define @test_vloxseg7_mask_nxv4f16_nxv4i32(half* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv16i8(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv16i8(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv4f16_nxv16i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv16i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv4f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv1i64(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv1i64(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv4f16_nxv1i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv1i64( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv1i32(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv1i32(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv4f16_nxv1i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv8i16(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv8i16(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv4f16_nxv8i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i8(half*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8(,,,,,,, half*, , , i64) @@ -103119,11 +16851,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv4f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -103131,134 +16862,15 @@ define @test_vloxseg7_mask_nxv4f16_nxv4i8(half* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv1i16(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv1i16(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv4f16_nxv1i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv1i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv4f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv2i32(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv2i32(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv4f16_nxv2i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv8i8(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv8i8(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv4f16_nxv8i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i64(half*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i64(,,,,,,, half*, , , i64) @@ -103275,11 +16887,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv4f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv4f16_nxv4i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -103287,56 +16898,15 @@ define @test_vloxseg7_mask_nxv4f16_nxv4i64(half* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i64( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv64i8(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv64i8(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv4f16_nxv64i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv64i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv4f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i16(half*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16(,,,,,,, half*, , , i64) @@ -103353,11 +16923,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv4f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -103365,409 +16934,15 @@ define @test_vloxseg7_mask_nxv4f16_nxv4i16(half* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv8i64(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv8i64(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv4f16_nxv8i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv8i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv4f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv8i64( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv1i8(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv1i8(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv4f16_nxv1i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv2i8(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv2i8(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv4f16_nxv2i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv8i32(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv8i32(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv4f16_nxv8i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv32i8(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv32i8(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv4f16_nxv32i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv16i32(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv16i32(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv4f16_nxv16i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv2i16(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv2i16(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv4f16_nxv2i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv2i64(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv2i64(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv4f16_nxv2i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv2i64( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv16i16(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv16i16(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv4f16_nxv16i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv32i16(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv32i16(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv4f16_nxv32i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i32(half*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32(,,,,,,,, half*, , , i64) @@ -103784,190 +16959,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv4f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv16i8(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv16i8(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv4f16_nxv16i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv16i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv4f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv1i64(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv1i64(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv4f16_nxv1i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv1i32(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv1i32(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv4f16_nxv1i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv8i16(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv8i16(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv4f16_nxv8i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i8(half*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8(,,,,,,,, half*, , , i64) @@ -103984,150 +16996,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv4f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv1i16(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv1i16(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv4f16_nxv1i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv1i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv4f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv2i32(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv2i32(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv4f16_nxv2i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv8i8(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv8i8(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv4f16_nxv8i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i64(half*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i64(,,,,,,,, half*, , , i64) @@ -104144,51 +17033,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv4f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv4f16_nxv4i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv64i8(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv64i8(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv4f16_nxv64i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vmv1r.v v17, v16 ; CHECK-NEXT: vmv1r.v v18, v16 ; CHECK-NEXT: vmv1r.v v19, v16 @@ -104197,15 +17045,13 @@ define @test_vloxseg8_mask_nxv4f16_nxv64i8(half* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i16(half*, , i64) @@ -104224,331 +17070,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv4f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv8i64(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv8i64(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv4f16_nxv8i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv1i8(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv1i8(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv4f16_nxv1i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv2i8(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv2i8(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv4f16_nxv2i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv8i32(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv8i32(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv4f16_nxv8i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv32i8(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv32i8(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv4f16_nxv32i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv16i32(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv16i32(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv4f16_nxv16i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv2i16(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv2i16(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv4f16_nxv2i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv2i64(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv2i64(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv4f16_nxv2i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -104557,321 +17082,13 @@ define @test_vloxseg8_mask_nxv4f16_nxv2i64(half* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv16i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv16i16(,, half*, , , i64) - -define @test_vloxseg2_nxv2f16_nxv16i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv16i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv32i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv32i16(,, half*, , , i64) - -define @test_vloxseg2_nxv2f16_nxv32i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv32i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv4i32(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv4i32(,, half*, , , i64) - -define @test_vloxseg2_nxv2f16_nxv4i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv4i32( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv16i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv16i8(,, half*, , , i64) - -define @test_vloxseg2_nxv2f16_nxv16i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv16i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv1i64(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv1i64(,, half*, , , i64) - -define @test_vloxseg2_nxv2f16_nxv1i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv1i64( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv1i32(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv1i32(,, half*, , , i64) - -define @test_vloxseg2_nxv2f16_nxv1i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv1i32( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv8i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv8i16(,, half*, , , i64) - -define @test_vloxseg2_nxv2f16_nxv8i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv8i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv4i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv4i8(,, half*, , , i64) - -define @test_vloxseg2_nxv2f16_nxv4i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv4i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv1i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv1i16(,, half*, , , i64) - -define @test_vloxseg2_nxv2f16_nxv1i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv1i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i32(half*, , i64) @@ -104890,228 +17107,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv8i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv8i8(,, half*, , , i64) - -define @test_vloxseg2_nxv2f16_nxv8i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv8i8(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32( %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv8i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv4i64(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv4i64(,, half*, , , i64) - -define @test_vloxseg2_nxv2f16_nxv4i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv4i64( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv64i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv64i8(,, half*, , , i64) - -define @test_vloxseg2_nxv2f16_nxv64i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv64i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv4i16(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv4i16(,, half*, , , i64) - -define @test_vloxseg2_nxv2f16_nxv4i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv4i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv8i64(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv8i64(,, half*, , , i64) - -define @test_vloxseg2_nxv2f16_nxv8i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv8i64( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv1i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv1i8(,, half*, , , i64) - -define @test_vloxseg2_nxv2f16_nxv1i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv1i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i8(half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8(,, half*, , , i64) @@ -105128,126 +17137,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv8i32(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv8i32(,, half*, , , i64) - -define @test_vloxseg2_nxv2f16_nxv8i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv8i32(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8( %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv8i32( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv32i8(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv32i8(,, half*, , , i64) - -define @test_vloxseg2_nxv2f16_nxv32i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv32i8( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv16i32(half*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv16i32(,, half*, , , i64) - -define @test_vloxseg2_nxv2f16_nxv16i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv16i32( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i16(half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16(,, half*, , , i64) @@ -105264,22 +17167,18 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16( %val, %val, half* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i64(half*, , i64) @@ -105298,339 +17197,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i64( %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv16i16(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv16i16(,,, half*, , , i64) - -define @test_vloxseg3_nxv2f16_nxv16i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i64( %val, %val, half* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv16i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv32i16(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv32i16(,,, half*, , , i64) - -define @test_vloxseg3_nxv2f16_nxv32i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv32i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv4i32(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv4i32(,,, half*, , , i64) - -define @test_vloxseg3_nxv2f16_nxv4i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv4i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv16i8(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv16i8(,,, half*, , , i64) - -define @test_vloxseg3_nxv2f16_nxv16i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv16i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv1i64(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv1i64(,,, half*, , , i64) - -define @test_vloxseg3_nxv2f16_nxv1i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv1i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv1i32(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv1i32(,,, half*, , , i64) - -define @test_vloxseg3_nxv2f16_nxv1i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv1i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv8i16(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv8i16(,,, half*, , , i64) - -define @test_vloxseg3_nxv2f16_nxv8i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv8i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv4i8(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv4i8(,,, half*, , , i64) - -define @test_vloxseg3_nxv2f16_nxv4i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv4i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv1i16(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv1i16(,,, half*, , , i64) - -define @test_vloxseg3_nxv2f16_nxv1i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv1i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i32(half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32(,,, half*, , , i64) @@ -105647,235 +17227,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv8i8(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv8i8(,,, half*, , , i64) - -define @test_vloxseg3_nxv2f16_nxv8i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv8i8(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv8i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv4i64(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv4i64(,,, half*, , , i64) - -define @test_vloxseg3_nxv2f16_nxv4i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv4i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv64i8(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv64i8(,,, half*, , , i64) - -define @test_vloxseg3_nxv2f16_nxv64i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv64i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv4i16(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv4i16(,,, half*, , , i64) - -define @test_vloxseg3_nxv2f16_nxv4i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv4i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv8i64(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv8i64(,,, half*, , , i64) - -define @test_vloxseg3_nxv2f16_nxv8i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv8i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv1i8(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv1i8(,,, half*, , , i64) - -define @test_vloxseg3_nxv2f16_nxv1i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv1i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i8(half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8(,,, half*, , , i64) @@ -105892,130 +17259,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv8i32(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv8i32(,,, half*, , , i64) - -define @test_vloxseg3_nxv2f16_nxv8i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv8i32(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv8i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv32i8(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv32i8(,,, half*, , , i64) - -define @test_vloxseg3_nxv2f16_nxv32i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv32i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv16i32(half*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv16i32(,,, half*, , , i64) - -define @test_vloxseg3_nxv2f16_nxv16i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv16i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i16(half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16(,,, half*, , , i64) @@ -106032,23 +17291,20 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i64(half*, , i64) @@ -106067,349 +17323,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv16i16(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv16i16(,,,, half*, , , i64) - -define @test_vloxseg4_nxv2f16_nxv16i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv16i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv32i16(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv32i16(,,,, half*, , , i64) - -define @test_vloxseg4_nxv2f16_nxv32i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv32i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv4i32(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv4i32(,,,, half*, , , i64) - -define @test_vloxseg4_nxv2f16_nxv4i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv4i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv16i8(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv16i8(,,,, half*, , , i64) - -define @test_vloxseg4_nxv2f16_nxv16i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv16i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv1i64(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv1i64(,,,, half*, , , i64) - -define @test_vloxseg4_nxv2f16_nxv1i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv1i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv1i32(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv1i32(,,,, half*, , , i64) - -define @test_vloxseg4_nxv2f16_nxv1i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv1i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv8i16(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv8i16(,,,, half*, , , i64) - -define @test_vloxseg4_nxv2f16_nxv8i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv8i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv4i8(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv4i8(,,,, half*, , , i64) - -define @test_vloxseg4_nxv2f16_nxv4i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv4i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv1i16(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv1i16(,,,, half*, , , i64) - -define @test_vloxseg4_nxv2f16_nxv1i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv1i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i32(half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32(,,,, half*, , , i64) @@ -106426,242 +17354,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv8i8(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv8i8(,,,, half*, , , i64) - -define @test_vloxseg4_nxv2f16_nxv8i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv8i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv8i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv4i64(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv4i64(,,,, half*, , , i64) - -define @test_vloxseg4_nxv2f16_nxv4i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv4i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv64i8(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv64i8(,,,, half*, , , i64) - -define @test_vloxseg4_nxv2f16_nxv64i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv64i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv4i16(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv4i16(,,,, half*, , , i64) - -define @test_vloxseg4_nxv2f16_nxv4i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv4i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv8i64(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv8i64(,,,, half*, , , i64) - -define @test_vloxseg4_nxv2f16_nxv8i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv8i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv1i8(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv1i8(,,,, half*, , , i64) - -define @test_vloxseg4_nxv2f16_nxv1i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv1i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i8(half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8(,,,, half*, , , i64) @@ -106678,134 +17387,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv8i32(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv8i32(,,,, half*, , , i64) - -define @test_vloxseg4_nxv2f16_nxv8i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv8i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv8i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv32i8(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv32i8(,,,, half*, , , i64) - -define @test_vloxseg4_nxv2f16_nxv32i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv32i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv16i32(half*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv16i32(,,,, half*, , , i64) - -define @test_vloxseg4_nxv2f16_nxv16i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv16i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i16(half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16(,,,, half*, , , i64) @@ -106822,24 +17420,21 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i64(half*, , i64) @@ -106858,359 +17453,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv16i16(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv16i16(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv2f16_nxv16i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv16i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv32i16(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv32i16(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv2f16_nxv32i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv32i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv4i32(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv4i32(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv2f16_nxv4i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv4i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv16i8(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv16i8(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv2f16_nxv16i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv16i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv1i64(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv1i64(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv2f16_nxv1i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv1i64( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv1i32(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv1i32(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv2f16_nxv1i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv1i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv8i16(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv8i16(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv2f16_nxv8i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv8i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv4i8(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv4i8(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv2f16_nxv4i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv4i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv1i16(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv1i16(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv2f16_nxv1i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv1i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i32(half*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32(,,,,, half*, , , i64) @@ -107227,249 +17486,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv8i8(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv8i8(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv2f16_nxv8i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv8i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv8i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv4i64(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv4i64(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv2f16_nxv4i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv4i64( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv64i8(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv64i8(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv2f16_nxv64i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv64i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv4i16(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv4i16(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv2f16_nxv4i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv4i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv8i64(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv8i64(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv2f16_nxv8i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv8i64( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv1i8(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv1i8(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv2f16_nxv1i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv1i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i8(half*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8(,,,,, half*, , , i64) @@ -107486,138 +17520,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv8i32(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv8i32(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv2f16_nxv8i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv8i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv8i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv32i8(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv32i8(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv2f16_nxv32i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv32i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv16i32(half*, , i64) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv16i32(,,,,, half*, , , i64) - -define @test_vloxseg5_nxv2f16_nxv16i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv16i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i16(half*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16(,,,,, half*, , , i64) @@ -107634,25 +17554,22 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i64(half*, , i64) @@ -107671,369 +17588,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg5_mask_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i64( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv16i16(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv16i16(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv2f16_nxv16i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv16i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv32i16(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv32i16(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv2f16_nxv32i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv32i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv4i32(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv4i32(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv2f16_nxv4i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv4i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv16i8(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv16i8(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv2f16_nxv16i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv16i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv1i64(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv1i64(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv2f16_nxv1i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv1i64( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv1i32(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv1i32(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv2f16_nxv1i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv1i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv8i16(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv8i16(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv2f16_nxv8i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv8i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv4i8(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv4i8(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv2f16_nxv4i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv4i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv1i16(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv1i16(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv2f16_nxv1i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv1i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i32(half*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32(,,,,,, half*, , , i64) @@ -108050,256 +17622,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv8i8(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv8i8(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv2f16_nxv8i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv8i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv8i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv4i64(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv4i64(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv2f16_nxv4i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv4i64( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv64i8(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv64i8(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv2f16_nxv64i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv64i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv4i16(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv4i16(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv2f16_nxv4i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv4i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv8i64(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv8i64(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv2f16_nxv8i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv8i64( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv1i8(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv1i8(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv2f16_nxv1i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv1i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i8(half*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8(,,,,,, half*, , , i64) @@ -108316,142 +17657,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv8i32(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv8i32(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv2f16_nxv8i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv8i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv8i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv32i8(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv32i8(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv2f16_nxv32i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv32i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv16i32(half*, , i64) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv16i32(,,,,,, half*, , , i64) - -define @test_vloxseg6_nxv2f16_nxv16i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv16i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i16(half*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16(,,,,,, half*, , , i64) @@ -108468,26 +17692,23 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i64(half*, , i64) @@ -108506,379 +17727,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg6_mask_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i64( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv16i16(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv16i16(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv2f16_nxv16i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv32i16(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv32i16(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv2f16_nxv32i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv4i32(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv4i32(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv2f16_nxv4i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv16i8(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv16i8(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv2f16_nxv16i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv1i64(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv1i64(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv2f16_nxv1i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv1i64( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv1i32(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv1i32(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv2f16_nxv1i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv8i16(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv8i16(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv2f16_nxv8i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv4i8(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv4i8(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv2f16_nxv4i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv1i16(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv1i16(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv2f16_nxv1i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i32(half*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32(,,,,,,, half*, , , i64) @@ -108895,11 +17762,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -108907,251 +17773,15 @@ define @test_vloxseg7_mask_nxv2f16_nxv2i32(half* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv8i8(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv8i8(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv2f16_nxv8i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv8i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv4i64(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv4i64(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv2f16_nxv4i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv4i64( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv64i8(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv64i8(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv2f16_nxv64i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv4i16(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv4i16(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv2f16_nxv4i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv8i64(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv8i64(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv2f16_nxv8i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv8i64( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv1i8(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv1i8(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv2f16_nxv1i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i8(half*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8(,,,,,,, half*, , , i64) @@ -109168,11 +17798,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -109180,134 +17809,15 @@ define @test_vloxseg7_mask_nxv2f16_nxv2i8(half* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv8i32(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv8i32(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv2f16_nxv8i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv8i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv32i8(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv32i8(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv2f16_nxv32i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv16i32(half*, , i64) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv16i32(,,,,,,, half*, , , i64) - -define @test_vloxseg7_nxv2f16_nxv16i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i16(half*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16(,,,,,,, half*, , , i64) @@ -109324,11 +17834,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -109336,15 +17845,13 @@ define @test_vloxseg7_mask_nxv2f16_nxv2i16(half* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i64(half*, , i64) @@ -109363,11 +17870,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg7_mask_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -109375,377 +17881,15 @@ define @test_vloxseg7_mask_nxv2f16_nxv2i64(half* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i64( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv16i16(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv16i16(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv2f16_nxv16i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv2f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv16i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv32i16(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv32i16(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv2f16_nxv32i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv32i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv4i32(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv4i32(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv2f16_nxv4i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv4i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv16i8(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv16i8(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv2f16_nxv16i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv16i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv1i64(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv1i64(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv2f16_nxv1i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv1i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv1i32(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv1i32(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv2f16_nxv1i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv1i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv8i16(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv8i16(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv2f16_nxv8i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv8i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv4i8(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv4i8(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv2f16_nxv4i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv4i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv1i16(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv1i16(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv2f16_nxv1i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv1i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i32(half*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32(,,,,,,,, half*, , , i64) @@ -109762,270 +17906,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv8i8(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv8i8(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv2f16_nxv8i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv8i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv2f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv8i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv4i64(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv4i64(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv2f16_nxv4i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv4i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv64i8(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv64i8(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv2f16_nxv64i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv64i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv4i16(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv4i16(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv2f16_nxv4i16(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv4i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv8i64(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv8i64(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv2f16_nxv8i64(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv8i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv1i8(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv1i8(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv2f16_nxv1i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv1i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i8(half*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8(,,,,,,,, half*, , , i64) @@ -110042,150 +17943,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv8i32(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv8i32(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv2f16_nxv8i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv8i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv2f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv8i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv32i8(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv32i8(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv2f16_nxv32i8(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv32i8(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv16i32(half*, , i64) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv16i32(,,,,,,,, half*, , , i64) - -define @test_vloxseg8_nxv2f16_nxv16i32(half* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv16i32(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i16(half*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16(,,,,,,,, half*, , , i64) @@ -110202,28 +17980,25 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i64(half*, , i64) @@ -110242,96 +18017,25 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +define @test_vloxseg8_mask_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv16i16(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv16i16(,, float*, , , i64) - -define @test_vloxseg2_nxv4f32_nxv16i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv16i16( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv32i16(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv32i16(,, float*, , , i64) - -define @test_vloxseg2_nxv4f32_nxv32i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv32i16( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i32(float*, , i64) @@ -110350,160 +18054,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv4f32_nxv4i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv16i8(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv16i8(,, float*, , , i64) - -define @test_vloxseg2_nxv4f32_nxv16i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv16i8(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32( %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv16i8( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv1i64(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv1i64(,, float*, , , i64) - -define @test_vloxseg2_nxv4f32_nxv1i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv1i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv1i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv1i64( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv1i32(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv1i32(,, float*, , , i64) - -define @test_vloxseg2_nxv4f32_nxv1i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv1i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv1i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv1i32( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv8i16(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv8i16(,, float*, , , i64) - -define @test_vloxseg2_nxv4f32_nxv8i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv8i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv8i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv8i16( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i8(float*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8(,, float*, , , i64) @@ -110520,126 +18084,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv4f32_nxv4i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv1i16(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv1i16(,, float*, , , i64) - -define @test_vloxseg2_nxv4f32_nxv1i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv1i16(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8( %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv1i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv1i16( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv2i32(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv2i32(,, float*, , , i64) - -define @test_vloxseg2_nxv4f32_nxv2i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv2i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv2i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv2i32( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv8i8(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv8i8(,, float*, , , i64) - -define @test_vloxseg2_nxv4f32_nxv8i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv8i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv8i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv8i8( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i64(float*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i64(,, float*, , , i64) @@ -110656,58 +18114,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv4f32_nxv4i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i64( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv64i8(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv64i8(,, float*, , , i64) - -define @test_vloxseg2_nxv4f32_nxv64i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv64i8(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i64( %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv64i8( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i16(float*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16(,, float*, , , i64) @@ -110724,366 +18144,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg2_mask_nxv4f32_nxv4i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv8i64(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv8i64(,, float*, , , i64) - -define @test_vloxseg2_nxv4f32_nxv8i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv8i64(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16( %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv8i64( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv1i8(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv1i8(,, float*, , , i64) - -define @test_vloxseg2_nxv4f32_nxv1i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv1i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv1i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv1i8( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv2i8(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv2i8(,, float*, , , i64) - -define @test_vloxseg2_nxv4f32_nxv2i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv2i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv2i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv2i8( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv8i32(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv8i32(,, float*, , , i64) - -define @test_vloxseg2_nxv4f32_nxv8i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv8i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv8i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv8i32( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv32i8(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv32i8(,, float*, , , i64) - -define @test_vloxseg2_nxv4f32_nxv32i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv32i8( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv16i32(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv16i32(,, float*, , , i64) - -define @test_vloxseg2_nxv4f32_nxv16i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv16i32( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv2i16(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv2i16(,, float*, , , i64) - -define @test_vloxseg2_nxv4f32_nxv2i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv2i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv2i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv2i16( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv2i64(float*, , i64) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv2i64(,, float*, , , i64) - -define @test_vloxseg2_nxv4f32_nxv2i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv2i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv2i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv2i64( %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv16i16(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv16i16(,,, float*, , , i64) - -define @test_vloxseg3_nxv4f32_nxv16i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv16i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv32i16(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv32i16(,,, float*, , , i64) - -define @test_vloxseg3_nxv4f32_nxv32i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv32i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i32(float*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32(,,, float*, , , i64) @@ -111100,165 +18174,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv4f32_nxv4i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv16i8(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv16i8(,,, float*, , , i64) - -define @test_vloxseg3_nxv4f32_nxv16i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv16i8(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv16i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv1i64(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv1i64(,,, float*, , , i64) - -define @test_vloxseg3_nxv4f32_nxv1i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv1i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv1i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv1i64( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv1i32(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv1i32(,,, float*, , , i64) - -define @test_vloxseg3_nxv4f32_nxv1i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv1i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv1i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv1i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv8i16(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv8i16(,,, float*, , , i64) - -define @test_vloxseg3_nxv4f32_nxv8i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv8i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv8i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv8i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i8(float*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8(,,, float*, , , i64) @@ -111275,130 +18206,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv4f32_nxv4i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv1i16(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv1i16(,,, float*, , , i64) - -define @test_vloxseg3_nxv4f32_nxv1i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv1i16(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv1i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv1i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv2i32(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv2i32(,,, float*, , , i64) - -define @test_vloxseg3_nxv4f32_nxv2i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv2i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv2i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv2i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv8i8(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv8i8(,,, float*, , , i64) - -define @test_vloxseg3_nxv4f32_nxv8i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv8i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv8i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv8i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i64(float*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i64(,,, float*, , , i64) @@ -111415,60 +18238,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv4f32_nxv4i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i64( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv64i8(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv64i8(,,, float*, , , i64) - -define @test_vloxseg3_nxv4f32_nxv64i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv64i8(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv64i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i16(float*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16(,,, float*, , , i64) @@ -111485,377 +18269,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg3_mask_nxv4f32_nxv4i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv8i64(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv8i64(,,, float*, , , i64) - -define @test_vloxseg3_nxv4f32_nxv8i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv8i64(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv8i64( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv1i8(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv1i8(,,, float*, , , i64) - -define @test_vloxseg3_nxv4f32_nxv1i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv1i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv1i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv1i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv2i8(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv2i8(,,, float*, , , i64) - -define @test_vloxseg3_nxv4f32_nxv2i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv2i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv2i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv2i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv8i32(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv8i32(,,, float*, , , i64) - -define @test_vloxseg3_nxv4f32_nxv8i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv8i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv8i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv8i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv32i8(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv32i8(,,, float*, , , i64) - -define @test_vloxseg3_nxv4f32_nxv32i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv32i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv16i32(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv16i32(,,, float*, , , i64) - -define @test_vloxseg3_nxv4f32_nxv16i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv16i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv2i16(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv2i16(,,, float*, , , i64) - -define @test_vloxseg3_nxv4f32_nxv2i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv2i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv2i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv2i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv2i64(float*, , i64) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv2i64(,,, float*, , , i64) - -define @test_vloxseg3_nxv4f32_nxv2i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv2i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv2i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv2i64( %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv16i16(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv16i16(,,,, float*, , , i64) - -define @test_vloxseg4_nxv4f32_nxv16i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv16i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv16i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv32i16(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv32i16(,,,, float*, , , i64) - -define @test_vloxseg4_nxv4f32_nxv32i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv32i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv32i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i32(float*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32(,,,, float*, , , i64) @@ -111872,170 +18301,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv4f32_nxv4i32( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv16i8(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv16i8(,,,, float*, , , i64) - -define @test_vloxseg4_nxv4f32_nxv16i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv16i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv16i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv16i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv1i64(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv1i64(,,,, float*, , , i64) - -define @test_vloxseg4_nxv4f32_nxv1i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv1i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv1i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv1i64( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv1i32(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv1i32(,,,, float*, , , i64) - -define @test_vloxseg4_nxv4f32_nxv1i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv1i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv1i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv1i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv8i16(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv8i16(,,,, float*, , , i64) - -define @test_vloxseg4_nxv4f32_nxv8i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv8i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv8i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv8i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i8(float*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8(,,,, float*, , , i64) @@ -112052,134 +18334,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv4f32_nxv4i8( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv1i16(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv1i16(,,,, float*, , , i64) - -define @test_vloxseg4_nxv4f32_nxv1i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv1i16(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv1i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv1i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv2i32(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv2i32(,,,, float*, , , i64) - -define @test_vloxseg4_nxv4f32_nxv2i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv2i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv2i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv2i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv8i8(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv8i8(,,,, float*, , , i64) - -define @test_vloxseg4_nxv4f32_nxv8i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv8i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv8i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv8i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i64(float*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i64(,,,, float*, , , i64) @@ -112196,60 +18367,21 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv4f32_nxv4i64( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i64( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv64i8(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv64i8(,,,, float*, , , i64) - -define @test_vloxseg4_nxv4f32_nxv64i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv64i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv64i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i16(float*, , i64) @@ -112268,311 +18400,20 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { +define @test_vloxseg4_mask_nxv4f32_nxv4i16( %val, float* %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv8i64(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv8i64(,,,, float*, , , i64) - -define @test_vloxseg4_nxv4f32_nxv8i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv8i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv8i64( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv1i8(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv1i8(,,,, float*, , , i64) - -define @test_vloxseg4_nxv4f32_nxv1i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv1i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv1i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv1i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv2i8(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv2i8(,,,, float*, , , i64) - -define @test_vloxseg4_nxv4f32_nxv2i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv2i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv2i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv2i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv8i32(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv8i32(,,,, float*, , , i64) - -define @test_vloxseg4_nxv4f32_nxv8i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv8i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv8i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv8i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv32i8(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv32i8(,,,, float*, , , i64) - -define @test_vloxseg4_nxv4f32_nxv32i8(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv32i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv32i8(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv32i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv16i32(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv16i32(,,,, float*, , , i64) - -define @test_vloxseg4_nxv4f32_nxv16i32(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv16i32(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv16i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv2i16(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv2i16(,,,, float*, , , i64) - -define @test_vloxseg4_nxv4f32_nxv2i16(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv2i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv2i16(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv2i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv2i64(float*, , i64) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv2i64(,,,, float*, , , i64) - -define @test_vloxseg4_nxv4f32_nxv2i64(float* %base, %index, i64 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv2i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv2i64(float* %base, %index, i64 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv2i64( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} -